blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25e14a195a87e2bdb57f80b014f9f4a6aeef8f8e
|
fd6170e5986d2575061174ea3be6a2c5d1b614c9
|
/cachematrix.R
|
20c738172d2b0a0c2de66364153d35e438cff1ff
|
[] |
no_license
|
cdiako16/ProgrammingAssignment2
|
2e092b0134b842bd288d11bcfe719de3334708a7
|
1cf6181a131983fb899592cac7a40e9747209087
|
refs/heads/master
| 2020-03-30T04:47:33.038076
| 2016-07-24T22:21:28
| 2016-07-24T22:21:28
| 64,082,483
| 0
| 0
| null | 2016-07-24T19:40:17
| 2016-07-24T19:40:16
| null |
UTF-8
|
R
| false
| false
| 2,113
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
#__________________________________________________
# This assigment has two functions. The first one creates a "special" matrix
# object and can cache its inverse. The second one uses the output of the first
# function and computes the inverse if there is no prior inverse that can be cached
## Write a short comment describing this function
##__________________________________________________
## This function (makeCacheMatrix) is able to create a matrix object and cache its inverse
## by setting the matrix, getting the matrix, setting the inverse and getting the inverse
makeCacheMatrix <- function(x = matrix()) {
my_inv = NULL
## x and my_inv have been initialized above
## Assigning y and NULL to x and my_inv in the parent environment
set = function (y) {
x <<- y
my_inv <<- NULL
}
## Retrieving x from the parent environment of makeCacheMatrix
get = function() x
# Getter and setter defined
setinverse = function(inverse) my_inv <<- inverse
getinverse = function () my_inv
## Assigning functions as elements within a list
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
##_________________________________________________
## This function computes the inverse of the matrix created by the first function above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
my_inv = x$getinverse()
if(!is.null(my_inv)) {
message("Getting cached inverse")
return(my_inv)
}
## If the above condition is not true, the inverse of 'x' is calculated
data <- x$get()
my_inv <- solve(data, ...)
x$setinverse(my_inv)
my_inv
}
|
f2749ea8e3bdadcaa971cdf2fa2793bde29b2318
|
5cdc28ca46d98557a4199a4664308454c860751d
|
/R/name_updater.R
|
0bc3dbfd17d5d318bf64968266051765fba34e1d
|
[] |
no_license
|
AndyTan524/baseball_backend
|
e96902192981b3528d111c5aafcf06a7f24b0059
|
f8de8733e83a1306e75c9d42b18b34ad1dfcdf55
|
refs/heads/master
| 2020-05-16T18:51:18.653678
| 2019-04-24T14:15:22
| 2019-04-24T14:15:22
| 183,242,198
| 0
| 1
| null | 2019-05-10T09:34:15
| 2019-04-24T14:08:25
|
R
|
UTF-8
|
R
| false
| false
| 1,099
|
r
|
name_updater.R
|
# Set up something to update this on a daily basis by loading new daily data and check for new player.
master <- read.csv("BIS/2016_player_list.csv")
today <- today() - 1
today <- format(today,"%Y%m%d")
batting <- read.csv(paste("BIS/Batting_",today,".csv",sep=""))
pitching <- read.csv(paste("BIS/Pitching_",today,".csv",sep=""))
batting <- batting[!(batting$GameId %in% NA),]
pitching <- pitching[!(pitching$GameId %in% NA),]
batting <- select(batting, LastName, FirstName, MLBId, PlayerName, Team)
pitching <- select(pitching, LastName, FirstName, MLBId, PlayerName, Team)
master2 <- rbind(batting,pitching)
master2 <- unique(master2)
master2 <- master2[!(master2$MLBId %in% master$MLBId),]
for(j in 1:5)
{
master2[,j] <- as.character(master2[,j])
}
for(k in 1:nrow(master2))
{
master2$FirstName[k] <- sub(paste(master2$LastName[k]," ",sep=""),"",master2$PlayerName[k])
}
master2$PlayerName <- paste(master2$FirstName," ",master2$LastName,sep="")
master <- rbind(master,master2[!(master2$MLBId %in% master$MLBId),])
write.csv(master,"BIS/2016_player_list.csv",row.names = FALSE)
|
7a8d9ddefb262ada791c86a45f33a51fc8afd4cd
|
0171da74586a079e97269ba9b7a8c4146c204cd0
|
/man/analyzeChr.Rd
|
17253a87749bb2ad3fe119ba777c1635ff4f1f24
|
[] |
no_license
|
jtleek/derfinder-1
|
bfda042e772224abbc911d94e0ba2c66fe5e9d08
|
a88996a426a899a5d319c628e3b9411237145caa
|
refs/heads/master
| 2021-01-22T13:42:50.235009
| 2013-11-08T18:19:41
| 2013-11-08T18:19:41
| 14,240,696
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,879
|
rd
|
analyzeChr.Rd
|
\name{analyzeChr}
\alias{analyzeChr}
\title{Run the derfinder analysis on a chromosome}
\usage{
analyzeChr(chrnum, coverageInfo, models, cutoffPre = 5,
colsubset = NULL, scalefac = 32, chunksize = NULL,
adjustF = 0, cutoffFstat = 1e-08,
cutoffType = "theoretical", nPermute = 1,
seeds = as.integer(gsub("-", "", Sys.Date())) + seq_len(nPermute),
maxRegionGap = 0L, maxClusterGap = 300L, groupInfo,
subject = "hg19", mc.cores = getOption("mc.cores", 2L),
writeOutput = TRUE, returnOutput = FALSE,
runAnnotation = TRUE, verbose = TRUE)
}
\arguments{
\item{chrnum}{Used for naming the output files when
\code{writeOutput=TRUE} and for
\link[bumphunter]{annotateNearest}. Use '21' instead of
'chr21'.}
\item{coverageInfo}{The output from \link{loadCoverage}.}
\item{models}{The output from \link{makeModels}.}
\item{cutoffPre}{This argument is passed to
\link{preprocessCoverage} (\code{cutoff}).}
\item{colsubset}{This argument is passed to
\link{preprocessCoverage}.}
\item{scalefac}{This argument is passed to
\link{preprocessCoverage}.}
\item{chunksize}{This argument is passed to
\link{preprocessCoverage}.}
\item{adjustF}{A single value to adjust that is added in
the denominator of the F-stat calculation. Useful when
the Residual Sum of Squares of the alternative model is
very small.}
\item{cutoffFstat}{This is used to determine the cutoff
argument of \link{calculatePvalues} and it's behaviour is
determined by \code{cutoffType}.}
\item{cutoffType}{If set to \code{empirical}, the
\code{cutoffFstat} (example: 0.99) quantile is used via
\link{quantile}. If set to \code{theoretical}, the
theoretical \code{cutoffFstats} (example: 1e-08) is
calculated via \link{qf}. If set to \code{manual},
\code{cutoffFstats} is passed to \link{calculatePvalues}
without any other calculation.}
\item{nPermute}{This argument is passed to
\link{calculatePvalues}.}
\item{seeds}{This argument is passed to
\link{calculatePvalues}.}
\item{maxRegionGap}{This argument is passed to
\link{calculatePvalues}.}
\item{maxClusterGap}{This argument is passed to
\link{calculatePvalues}.}
\item{groupInfo}{A factor specifying the group membership
of each sample that can later be used with
\code{plotRegion}.}
\item{subject}{This argument is passed to
\link[bumphunter]{annotateNearest}. Note that only
\code{hg19} works right now.}
\item{mc.cores}{This argument is passed to
\link{preprocessCoverage} (useful if
\code{chunksize=NULL}), \link{calculateStats} and
\link{calculatePvalues}.}
\item{writeOutput}{If \code{TRUE}, output Rdata files are
created at each step inside a directory with the
chromosome name (example: 'chr21' if \code{chrnum="21"}).
One Rdata files is created for each component described
in the return section.}
\item{returnOutput}{If \code{TRUE}, it returns a list
with the results from each step. Otherwise, it returns
\code{NULL}.}
\item{runAnnotation}{If \code{TRUE}
\link[bumphunter]{annotateNearest} is run. Otherwise this
step is skipped.}
\item{verbose}{If \code{TRUE} basic status updates will
be printed along the way.}
}
\value{
If \code{returnOutput=TRUE}, a list with six components:
\describe{ \item{timeinfo }{ The wallclock timing
information for each step.} \item{optionsStats }{ The
main options used when running this function.}
\item{coveragePrep }{ The output from
\link{preprocessCoverage}.} \item{fstats}{ The output
from \link{calculateStats}.} \item{regions}{ The output
from \link{calculatePvalues}.} \item{annotation}{ The
output from \link[bumphunter]{annotateNearest}.} } These
are the same components that are written to Rdata files
if \code{writeOutput=TRUE}.
}
\description{
This is a major wrapper for running several key functions
from this package. It is meant to be used after
\link{loadCoverage} has been used for a specific
chromosome. The steps run include \link{makeModels},
\link{preprocessCoverage}, \link{calculateStats},
\link{calculatePvalues} and
\link[bumphunter]{annotateNearest}.
}
\examples{
## Calculate library size adjustments
sampleDepths <- sampleDepth(list(genomeData$coverage), prob=0.5, nonzero=TRUE, center=TRUE, verbose=TRUE)
sampleDepths
## Build the models
group <- genomeInfo$pop
adjustvars <- data.frame(genomeInfo$gender)
models <- makeModels(sampleDepths, testvars=group, adjustvars=adjustvars)
## Analyze the chromosome
results <- analyzeChr(chrnum="21", coverageInfo=genomeData, models=models, cutoffFstat=1, cutoffType="manual", groupInfo=group, mc.cores=1, writeOutput=FALSE, returnOutput=TRUE)
names(results)
}
\author{
Leonardo Collado-Torres
}
\seealso{
\link{makeModels}, \link{preprocessCoverage},
\link{calculateStats}, \link{calculatePvalues},
\link[bumphunter]{annotateNearest}
}
|
c1dd381ff09a74d3ea6c70115585adbba921c4fd
|
ea805d721a3cdc2db7a75e38a9b212e4e1885778
|
/ribiosExpression/man/writeEset.Rd
|
d1fca2e5f370879f3124e330696b6c0cfd67e4cd
|
[] |
no_license
|
grst/ribios
|
28c02c1f89180f79f71f21a00ba8ad8c22be3251
|
430056c85f3365e1bcb5e565153a68489c1dc7b3
|
refs/heads/master
| 2023-06-01T04:48:20.792749
| 2017-04-10T14:28:23
| 2017-04-10T14:28:23
| 68,606,477
| 0
| 0
| null | 2016-09-19T13:04:00
| 2016-09-19T13:04:00
| null |
UTF-8
|
R
| false
| false
| 4,039
|
rd
|
writeEset.Rd
|
\name{writeEset}
\alias{readEset}
\alias{writeEset}
\title{
Import and export an ExpressionSet object as tab-delimited files
}
\description{
Two functions, \code{writeEset} and \code{readEset}, import and export
an \code{ExpressionSet} object as tab-delimited files
respectively. See details below for advantages and limitations.
}
\usage{
writeEset(eset, exprs.file, fData.file, pData.file)
readEset(exprs.file, fData.file, pData.file)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{eset}{Required for \code{writeEset}, an \code{ExpressionSet}
object to be exported.}
\item{exprs.file}{Required, character string, full name of the file containing the expression
matrix.}
\item{fData.file}{Optional, character string, full name of the file containing feature
annotations. \code{NULL} is handled specially: it will cause no
reading or writing of the feature annotation data.}
\item{pData.file}{Optional, character string, full name of the file
containing sample annotations. \code{NULL} is handled specially: it
will cause no reading or writing of the sample annotation data.}
}
\details{
\code{readEset} and \code{writeEset} provide a lightweighted mechanism
to import/export essential information from/to plain text files. They
can use up to three tab-delimited files to store information of an
\code{ExpressionSet} object: a file holding the expression matrix as
returned by the \code{\link{exprs}} function (\code{exprs.file}), a
file containing feature annotations as returned by the \code{\link{fData}}
function (\code{fData.file}), and finally a file containing sample
annotations, as returned by \code{pData} (\code{pData.file}).
All three files are saved as tab-delimited, quoted plain files with
both row and column names. They can be readily read in by the
\code{read.table} function with default parameters.
In both functions, \code{fData.file} and \code{pData.file} are
optional. Leaving them missing or settign their values to \code{NULL}
will prevent exporting/importing annotations.
One limitation of these functions is that they only support the
export/import of \strong{one} expression matrix from one
\code{ExpressionSet}. Although an \code{ExpressionSet} can hold more
than one matrices other than the one known as \code{exprs}, they are
not handled now by \code{writeEset} or \code{readEset}. If such an
\code{ExprssionSet} object is first written in plain files, and then
read back as an \code{ExpressionSet}, matrices other than the one
accessible by \code{exprs} will be discarded.
Similarly, other pieces of information saved in an \code{ExpressionSet}, e.g. annotations or
experimental data, are lost as well after a cycle of exporting and
subsequent importing. If keeping these information is important for
you, other functions should be considered instead of \code{readEset}
and \code{writeEset}, for instance to save an image in a binary file
with the \code{\link{save}} function.
}
\value{
\code{readEset} returns an \code{ExpressionSet} object from plain
files.
\code{writeEset} is used for its side effects (writing files).
}
\author{
Jitao David Zhang <jitao_david.zhang@roche.com>
}
\note{
\code{readEset} will stop if the fData.file or pData.file does not
look like a valid annotation file, by checking they have the same
dimension as suggested by the expression matrix, and matching the
feature/sample names with those stored in the expression matrix file.
}
\seealso{
See \code{\link{readGctCls}} and \code{\link{writeGctCls}} for
importing/exporting functions for files in gct/cls formats.
}
\examples{
sysdir <- system.file("extdata", package="ribiosExpression")
sysexp <- file.path(sysdir, "sample_eset_exprs.txt")
sysfd <- file.path(sysdir, "sample_eset_fdata.txt")
syspd <- file.path(sysdir, "sample_eset_pdata.txt")
sys.eset <- readEset(exprs.file=sysexp,
fData.file=sysfd,
pData.file=syspd)
sys.eset
}
|
4d0193488551c947d4ee64734f8b81e5a4609cef
|
cf6d6b48a353b9d4807176064c1ba3db0bab3ee0
|
/man/install_h2o.Rd
|
78fa6013bebcb2491515b2e513516dabe24fa00d
|
[
"Apache-2.0"
] |
permissive
|
javierluraschi/rsparkling
|
05673ee602032dbab77713bd9bb74fc2a13fa660
|
4acb98ce989978718a8d93ddf75d6f9cb065d6bc
|
refs/heads/master
| 2021-01-25T07:07:10.139239
| 2017-01-31T22:39:52
| 2017-01-31T22:39:52
| 80,698,186
| 0
| 0
| null | 2017-02-02T06:02:45
| 2017-02-02T06:02:44
| null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
install_h2o.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install_h2o.R
\name{install_h2o}
\alias{install_h2o}
\title{An easy installation of the H2O R pacakage}
\usage{
install_h2o(release_name = "rel-tverberg", release_number = "1")
}
\arguments{
\item{release_name}{Object of type character that specifies the release name of the H2O pacakge}
\item{release_number}{Object of type character that specifies the release number of the H2O pacakge}
}
\description{
An easy installation of the H2O R pacakage
}
\examples{
\donttest{
#Install the latest release of H2O on 1/30/16 (relv-tverberg-1)
install_h2o(release_name = "rel-tverberg", release_number = "1")
}
}
|
f82805f2b0c212734ffaf6ee3262392d7ca7dfe7
|
a0520c024186c353624365a97fbe71407bde7687
|
/R/plotServer.R
|
d58577fea54d6cfb2b461cb5d7124257e4e0cf84
|
[] |
no_license
|
epurdom/clusterExperimentShiny
|
ae36464f1debc6b1e9186dd82b1289a1d2dd8f56
|
0b776088174ee6f4e6bed1fadbb71df566905e83
|
refs/heads/master
| 2021-05-01T03:49:07.314819
| 2016-11-28T00:42:51
| 2016-11-28T00:42:51
| 64,497,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,358
|
r
|
plotServer.R
|
#' @rdname InternalModules
#' @export
#if fileName=NULL, renders plot; otherwise saves as png based on fileName
plotClustersServer<-function(code,fileName=NULL,recordCode=FALSE,
type=c("plotClusters","plotCoClustering","plotHeatmap","plotDendrogram","mergeClusters")){
type<-match.arg(type)
cE<-get("cE",envir=appGlobal)
#if(type=="plotDendrogram") browser()
tmp<-strsplit(code,"whichClusters=") #assumes whichClusters is always last in plotCluster command
if(length(tmp[[1]])>1){
whichClusters<-gsub("c(","",tmp[[1]][2])
whichClusters<-gsub(")","",whichClusters)
whichClusters<-strsplit(whichClusters,",")[[1]]
nclustersPlot<-sum(clusterTypes(cE) %in% whichCusters)
}
else{ #use default which is "workflow"
nclustersPlot<-ncol(workflowClusters(cE))
}
heightR<-if(type=="plotClusters") max((40/3) * nclustersPlot, 480) else "auto"
heightFixed<-if(type=="plotClusters") heightR else 480
widthFixed<-if(type=="plotCluster")2*480 else 480
#if want to specialize by type plot, should go back to switch:
# heightR<-switch(type,"plotCluster"= max((40/3) * nclustersPlot, 480),
# "plotCoClustering"="auto")
# heightFixed<-switch(type,"plotCluster"= heightR,
# "plotCoClustering"=480)
# widthFixed<-switch(type,"plotCluster"=2*480,"plotCoClustering"=480)
plotCMar<-if(type=="plotClusters") c(.25 * 1.1, 3 * 8.1, .25 * 4.1, 3 * 1.1) else c(5.1 ,4.1 ,4.1 ,2.1)
if(is.null(fileName)){ #not a download code
if(recordCode){
recordTag<-switch(type,
"plotClusters"="Plot Clusters",
"plotCoClustering"="Plot Co-Clustering",
"plotHeatmap"="Plot Heatmap",
"plotDendrogram"="Plot Dendrogram",
"mergeClusters"="Dendrogram from MergeClusters Plot")
recordCodeFun(code,tag=recordTag)
}
#if(type=="mergeClusters") browser()
renderPlot({
par(mar=plotCMar)
eval(parse(text = code))
}, height = heightR)
}
else{
png(fileName, height = heightFixed, width = widthFixed)
par(mar=plotCMar)
eval(parse(text = code))
dev.off()
}
}
|
4dc95ef24ad105f7f9014d6e0e2ce8507b03d840
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/Rpdb/R/coords.R
|
81496890c85796f2961269e47a558e7810c47f4a
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,503
|
r
|
coords.R
|
# Get or set the coordinates (either Cartesian or fractional atoms) of an object.
coords <- function(...)
UseMethod("coords")
'coords<-' <- function(x, value)
UseMethod("coords<-", x)
coords.default <- function(x1, x2, x3, basis = "xyz", ...)
{
if(!basis %in% c("xyz", "abc")) stop("Unrecognized basis")
to.return <- data.frame(x1,x2,x3)
attr(to.return, which = "basis") <- basis
class(to.return) <- c("coords","data.frame")
return(to.return)
}
coords.data.frame <- function(x, basis = NULL, ...)
{
if(!is.data.frame(x)) stop("'x' must be a 'data.frame'")
if(is.null(basis)){
if(all(c("x","y","z") %in% names(x))){
x <- x[,c("x","y","z")]
basis <- "xyz"
}
else if(all(c("a","b","c") %in% names(x))){
x <- x[,c("a","b","c")]
basis <- "abc"
}
else stop("Can not convert this 'data.frame' into 'coords': Coordinates not found")
}
else if(!basis %in% c("xyz","abc")) stop("Unrecognized 'basis'")
if(ncol(x) != 3L) stop("'x' must be a three-columns data.frame")
to.return <- coords.default(x[,1], x[,2], x[,3], basis = basis)
return(to.return)
}
coords.matrix <- function(x, basis = NULL, ...){
if(!is.matrix(x)) stop("'x' must be a 'matrix'")
to.return <- coords.data.frame(as.data.frame(x), basis = basis, ...)
return(to.return)
}
coords.atoms <- function(x, ...)
{
if(!is.atoms(x)) stop("'x' must be an object of class 'atoms'")
to.return <- coords(x$x1,x$x2,x$x3,basis.default(x))
return(to.return)
}
'coords<-.atoms' <- function(x, value)
{
if(!is.atoms(x)) stop("'x' must be an object of class 'atoms'")
if(!is.coords(value)) stop("'value' must be an object of class 'coords'")
if(nrow(x) != nrow(value)) stop(paste("arguments imply different number of rows: ",nrow(x),", ",nrow(value),sep=""))
x[c("x1","x2","x3")] <- value
basis(x) <- basis.default(value)
return(x)
}
coords.pdb <- function(x, ...)
{
if(!is.pdb(x)) stop("'x' must be an object of class 'pdb'")
to.return <- coords.atoms(x$atoms)
return(to.return)
}
'coords<-.pdb' <- function(x, value)
{
if(!is.pdb(x)) stop("'x' must be an object of class 'pdb'")
if(!is.coords(value)) stop("'value' must be an object of class 'coords'")
if(nrow(x$atoms) != nrow(value)) stop(paste("arguments imply different number of rows: ",nrow(x$atoms),", ",nrow(value),sep=""))
coords(x$atoms) <- value
return(x)
}
is.coords <- function(x)
{
to.return <- any(class(x) == "coords")
return(to.return)
}
|
345c09f97f15e6b3896f8ce48eafb2dce4f1928b
|
7c53c14329aabdcb5a461728759246e3b65fef44
|
/R/twosampleKtest.R
|
d51316937658dadc0c9e290083e932a96ccb53d9
|
[] |
no_license
|
ute/hidden2statspat
|
4ff1267ef1864d3c4c5cae291739219746089f0a
|
4930164936199b7637736d7992ff7d394bce81c4
|
refs/heads/master
| 2021-01-20T05:08:33.341676
| 2015-06-07T11:50:47
| 2015-06-07T11:50:47
| 12,253,533
| 1
| 0
| null | 2013-10-09T18:39:59
| 2013-08-20T20:33:55
|
R
|
UTF-8
|
R
| false
| false
| 6,386
|
r
|
twosampleKtest.R
|
#'@title Two sample K-permutation-test
#'@description Perform a permutation test to compare K-functions estimated on
#'two point pattern samples
#' Returns an object of class \code{sostest}
#'that also can be plotted, see the details.
#'@param x,y the point pattern samples to be compared, objects of class \code{ppsample}
#'@param rmax numeric, the upper integration limit, see Details,
#'@param rlen optional, number of steps for numerical integration, defaults to 256,
#see Details,
#'@param Kfun optional \code{function}, the \eqn{K}-function to be used,
#' either \code{\link{K.est}} (default) or \code{\link{DeltaKdir.est}},
#'@param ... optional parameters for the function \code{Kfun}. To speed up
#'calculations, it is recommended to give an explicite \code{correction} argument.
#'@param use.tbar logical, if true, a modified test statistic is used, see Details,
#'@param nperm number of random permutations, see Details. Defaults to 1000.
#@param noTest optional logical, if \code{TRUE}, no test is run, only the point
#pattern subsamples and \eqn{K}-function estimates are returned.
#'
#'@details The function tests if the \eqn{K}-functions estimated on the
#'point pattern samples have the same mean.
#'\subsection{What the test does, and details on the parameters}{
#'The \eqn{K}-function, or \eqn{\Delta K_{dir}}, is estimated on all patterns in
#'the two samples, and the resulting two samples of estimated K-functions are
#'compared by a permutation test.
#'The test statistic is the integral over a squared Welch-t-statistic,
#'\deqn{T=\int_0^{r_max}\frac{(K_1(r)-K_2(r))^2}{s_1^2(r)/m_1 +
#' s_2^2(r)/m_2} d r}{T = integral [ (K_1(r)-K_2(r))^2 / (s_1^2(r)/m_1 +
#' s_2^2(r)/m_2)],}
#'where \eqn{K_1(r)} and \eqn{K_2(r)} are the group means, and
#'\eqn{s_1^2(r), s_2^2(r)} are within group variances at a fixed argument \eqn{r}.
#'The integral spans an interval \eqn{[0, r_{max}]}. It is approximated by the mean
#'of the integrand over all \code{rlen} values of \eqn{r}, multiplied by the length
#'of the integral, i.e., by \code{rmax}.
#'
#'A variant of the test statistic, \eqn{\bar T}{Tbar}, uses the variance stabilized
#'function \eqn{K(r)/r} instead of \eqn{K(r)} and replaces the denominator in
#'the integrand with \eqn{mean (s_1^2(r)/m_1 + s_2^2(r)/m_2)}. To use this variant
#'instead of the original statistc \eqn{T}, let \code{use.tbar = TRUE}
#'
#'The \emph{p}-value is obtained by permutation across the groups; the number of
#'permutations is specified by \code{nperm}. If \code{nperm = NULL},
#'the exact test with all permutations is used (combinations, for symmetry reasons).
#'This may cause memory or computing time issues.
#'If \code{nperm} is given as an integer, the permutations are sampled randomly,
#'unless \code{nperm} is larger than the number of disjoint combinations.
#'In that case, the exact version is applied, see \code{\link{tL2.permtest}}.
#'
#'To test against differences in local anisotropy, H\&J (2013) propose to use
#'the \eqn{\Delta K_{dir}}-function
#'instead of the isotropic \eqn{K}-function.
#'For this variant of the test, let \code{Kfun = DeltaKdir.est}.
#'
#'A list of quadrats as required for argument \code{qsets} can be obtained by
#'function \code{\link{twoquadsets}}.
#'}
#'\subsection{Details on the return value}{
#'The test returns an object belonging to classes \code{sostest} and \code{htest},
#'a list containing the following components:
#'\tabular{ll}{
#' \cr\code{statistic}\tab{the value of the test statistic,}
#' \cr\code{p.value}\tab{the p-value of the test,}
#' \cr\code{alternative}\tab{a character string describing the alternative hypothesis,}
#' \cr\code{method}\tab{a character string indicating what type of test was performed,}
#' \cr\code{data.name}\tab{a character string giving the name(s) of the data.}
#' \cr\code{Ksamples}\tab{a list of \code{\link{fdsample}}-objects, with elements
#' \code{x}, \code{y} and \code{theo}}
#' }
#'}
#'
#'@export
#'@author Ute Hahn, \email{ute@@imf.au.dk}
#' Hahn, U. (2012) A studentized permutation test for the comparison of spatial point patterns.
#' \emph{Journal of the American Statistical Association} \strong{107} (498), 754--764.
#'
#' Hahn, U. and Jensen, E. B. V. (2013)
#' Inhomogeneous spatial point processes with hidden second-order stationarity.
#' \emph{CSGB preprint} 2013-7.
#' \url{http://data.imf.au.dk/publications/csgb/2013/math-csgb-2013-07.pdf}
#'@seealso function \code{\link{tL2.permtest}} from package {fdnonpar} is used
#'as test engine, \code{\link{twoquadsets}} is used for setting up quadrat samples.
twosample.K.test <- function (x, y,
Kfun = K.est,
rmax, rlen = 256,
...,
use.tbar = FALSE,
nperm = 1000)
{
if(!is.ppsample(x) || !is.ppsample(y))
stop("expect point pattern samples as arguments")
AnisTest <- identical(Kfun, DeltaKdir.est)
dataname <- paste("point pattern samples", deparse(substitute(x)),
"and", deparse(substitute(y)))
rr <- seq(0, rmax, length.out=rlen)
Kfunx <- lapply(x, Kfun, r = rr, ...)
Ktheo <- extract.fdsample(Kfunx[1], "theo")
Kx <- extract.fdsample(Kfunx)
Ky <- extract.fdsample(lapply(y, Kfun, r = rr, ...))
if (use.tbar) {
Kxr <- fdsample(rr[rr>0], (Kx$fvals / rr)[rr>0, ])
Kyr <- fdsample(rr[rr>0], (Ky$fvals / rr)[rr>0, ])
testerg <-tL2.permtest(Kxr, Kyr, nperm = nperm, use.tbar = TRUE)
} else {
testerg <-tL2.permtest(Kx, Ky, nperm = nperm, use.tbar = FALSE)
}
method <- c(paste("Two sample studentized permutation test of identical K-functions,"),
ifelse(AnisTest, "directional version, using Delta K_dir",
"isotropic version, using K_0"),
paste("test statistic: ", if(use.tbar) "Tbar,"
else "T,", "upper integration bound:",rmax),
testerg$method[2])
alternative <- c(paste("not the same K-function"),
if(AnisTest) ",\nbut different kinds of anisotropy")
testerg$method <- method
testerg$alternative <- alternative
testerg$data.name <- dataname
testerg$Ksamples <- list(theo = Ktheo, x = Kx, y = Ky)
# don't change this - theo is asumed to be the first to be plotted, otherwise
# it will lie above the interesting things
firstclass(testerg) <- "sostest"
return(testerg)
}
|
2adce34f9668c46573cb5f67495fe451bd707711
|
e648d4e3cca5c60bab6160f9df1deb9463b8da8a
|
/08_grad_project/Deliverables/biOps/man/imgFFT.Rd
|
83aef85fafe8be8d9930c1ab4c3269e3fb7fe9b6
|
[
"MIT"
] |
permissive
|
blairg23/pattern-recognition
|
9c65edda419671625eafe6711964391033768b50
|
e00d8b67f7c73a81fac02f0cff8db95f88151121
|
refs/heads/master
| 2021-01-10T14:18:20.438260
| 2015-12-15T08:58:03
| 2015-12-15T08:58:03
| 48,028,773
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 571
|
rd
|
imgFFT.Rd
|
\name{imgFFT}
\alias{imgFFT}
\title{Fast Fourier Transformation of an image}
\description{
This function applies a Fast Fourier Transformation on an imagedata.
}
\usage{imgFFT(imgdata, shift = TRUE)}
\arguments{
\item{imgdata}{The image}
\item{shift}{If TRUE (default), the transformation origin is centered}
}
\value{
return a complex matrix
}
\examples{
\dontrun{
x <- readJpeg(system.file("samples", "violet.jpg", package="biOps"))
t <- imgFFT(x)
}
}
\seealso{
\code{\link{imgFFTInv}}
\code{\link{imgFFTShift}}
\code{\link{imgFFTiShift}}
}
\keyword{math}
|
bb9b6c98519da95e39d8c889a2e9a5d49589165a
|
3124eae2c2cc624306b83f945f0f0730841798ce
|
/demo/seewave.R
|
dc8d825d01de05b699d8d8e3ae0d629fa54af459
|
[] |
no_license
|
cran/seewave
|
7841703a7f1cf237ce67e84f0f5b1dba877b1dff
|
1f0b3d5688151141129368c17f826ccedcb4ad96
|
refs/heads/master
| 2023-08-08T11:45:55.093227
| 2023-07-16T05:50:02
| 2023-07-16T06:34:59
| 17,699,546
| 18
| 12
| null | 2023-01-27T12:57:14
| 2014-03-13T06:15:47
|
R
|
UTF-8
|
R
| false
| false
| 2,300
|
r
|
seewave.R
|
require(seewave); data(tico); data(orni); data(pellucens)
op1<-par(ask=TRUE)
# different oscillograms of a tropical sparrow song
oscillo(tico,f=22050)
oscillo(tico,f=22050,k=2,j=2,byrow=TRUE)
op<-par(bg="grey")
oscillo(tico,f=22050,k=4,j=1,title=TRUE,colwave="black",
coltitle="yellow",collab="red",colline="white",
colaxis="blue",coly0="grey50")
par(op)
# overplot of oscillographic and envelope representation
oscillo(tico,f=22050)
par(new=TRUE)
env(tico,f=22050,colwave=2)
# temporal automatic measurements
timer(orni,f=22050,threshold=5,msmooth=c(40,0),
bty="l",colval="blue")
title(main="Timer() for automatic time measurements",col="blue")
# instantaneous frequency
ifreq(tico,f=22050,threshold=5)
title(main="Instantaneous frequency using Hilbert transform")
# comparaison of a full spectrum and a mean spectrum of a cicada song
op<-par(mfrow=c(2,1))
spec(orni,f=22050,type="l")
title("spec()")
meanspec(orni,f=22050,wl=512,type="l")
title("meanspec()")
par(op)
# basic 2D spectrogram of a bird song
op <- par(op)
spectro(tico,f=22050,wl=512,ovlp=50,zp=16,collevels=seq(-40,0,0.5))
par(op)
# spectrogram and dominant frequency overlaid of a bird song
op <- par(op)
spectro(tico, f=22050, ovlp=50, palette=reverse.gray.colors.2, scale=FALSE)
par(new=T)
dfreq(tico, f=22050, ovlp=50, threshold=6, col="red", ann=FALSE, xaxs="i", yaxs="i")
par(op)
# 2D spectrogram of a cricket song with colour modifications
op <- par(op)
pellu2<-cutw(pellucens,f=22050,from=1,plot=FALSE)
spectro(pellu2,f=22050,wl=512,ovlp=85,collevels=seq(-25,0,1),osc=TRUE,palette=reverse.heat.colors,
colgrid="white", colwave="white",colaxis="white",collab="white", colbg="black")
par(op)
# sound synthesis
op <- par(op)
F1<-synth(f=22050,am=c(50,10),cf=2000,d=1,fm=c(500,5,0),plot=FALSE)
F2<-synth(f=22050,a=0.8,cf=4000,am=c(50,10),d=1,fm=c(500,5,0),plot=FALSE)
F3<-synth(f=22050,a=0.6,cf=6000,am=c(50,10),d=1,fm=c(500,5,2000),plot=FALSE)
F4<-synth(f=22050,a=0.4,cf=8000,am=c(50,10),d=1,fm=c(500,5,2000),plot=FALSE)
final1<-F1+F2+F3+F4
spectro(final1,f=22050,wl=512,ovlp=75,osc=TRUE)
title(main="synthesis of a AM/FM sound")
par(op)
# 3D spectrogram of a tropical sparrow song
spectro3D(tico,f=22050,wl=512,ovlp=75,zp=16,maga=2)
|
7e72ce62b0a3a1255f815a8bc13302ff21671de5
|
7e1b087503d5d65ff7a0d2da840ebd5ad6171768
|
/run_analysis.R
|
749cf11246e1477bb33902f187f1b781a587c095
|
[] |
no_license
|
lmalonso/GetData
|
a16d1e5be278dd5730f43758901f68f34b1953c4
|
38c69ab24bc6e1f7d6cbbd7d60305bec6a83a5a1
|
refs/heads/master
| 2020-04-06T04:30:17.382276
| 2015-06-21T19:36:55
| 2015-06-21T19:36:55
| 37,822,680
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,173
|
r
|
run_analysis.R
|
#We set our working directory and assign thess data sets
actlabels<-read.table("activity_labels.txt",header=FALSE)
features<-read.table("features.txt",header=FALSE)
xtest<-read.table("test/X_test.txt",header=FALSE,sep="")
subtest<-read.table("test/subject_test.txt",header=FALSE)
ytest<-read.table("test/y_test.txt",header=FALSE)
xtrain<-read.table("train/X_train.txt",header=FALSE,sep="")
subtrain<-read.table("train/subject_train.txt",header=FALSE,sep="")
ytrain<-read.table("train/y_train.txt",header=FALSE,sep="")
#Step1
mergedat<-rbind(xtest,xtrain)
#Step2
colmean<-grep("mean()",as.vector(features[,2]),fixed=TRUE)
colsd<-grep("std()",as.vector(features[,2]),fixed=TRUE)
matdata<-mergedat[,c(colmean,colsd)]
#Step3
ynumtot<-rbind(ytest,ytrain)
yacttot<-vector()
for(i in 1:length(ynumtot[,1])){
for(j in 1:length(actlabels[,1])){
if(ynumtot[i,1]==actlabels[j,1]){
yacttot[i]=as.vector(actlabels[j,2])
}
}
}
library(data.table)
matdata<-data.table(matdata)
matdata<-matdata[,Activity:=yacttot]
subtot<-rbind(subtest,subtrain)
matdata<-matdata[,Subject:=subtot]
#Step4
colmean2<-grep("mean()",as.vector(features[,2]),fixed=TRUE,value=TRUE)
colsd2<-grep("std()",as.vector(features[,2]),fixed=TRUE,value=TRUE)
matdata<-data.frame(matdata)
colnames(matdata)<-c(colmean2,colsd2,"Activity","Subject")
#Step5.
newestdata<-matrix(nrow=180,ncol=68)
meann<-vector()
for(i in 1:6){
act<-c("WALKING","STANDING","SITTING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","LAYING")
for(j in 1:30){
Subset<-matdata[matdata$Activity==act[i],]
Subset2<-Subset[Subset$Subject==j,]
Subset22<-data.frame(Subset2)
for(k in 1:66){
meann[k+2]<-mean(as.numeric(Subset22[,k]))
meann[2]<-act[i]
meann[1]<-j
}
val<-30*(i-1)+j
newestdata[val,]<-meann
}
}
newestdata<-data.frame(newestdata)
avvec<-c("","",rep("Average ",times=66))
namess<-c("Subject","Activity",colmean2,colsd2)
newnames<-paste(avvec,namess,sep="")
colnames(newestdata)<-newnames
write.table(newestdata,file="run_analysis.txt",row.names=TRUE,quote=TRUE)
|
8017bfbcecfca93d7f375dd121563f4e03c1d62b
|
147f0bf99987b9acbf8c55491f49b2a14a0b3b1b
|
/statistics/project COURSERA/project.R
|
696f1bb7cd37ca03590c907fce4189d4effad131
|
[] |
no_license
|
Ashutosh-vyas/r-codes
|
ec5a78ab2ca89c74e42805ffa0e0eb3c0c49909d
|
40f591283af2f10ca4a2e9bed89cd1a1ada556d4
|
refs/heads/master
| 2021-01-20T06:57:22.710268
| 2015-06-17T09:54:57
| 2015-06-17T09:54:57
| 31,583,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,392
|
r
|
project.R
|
# PROBLEM STATEMENT
# -----------------------------------------------------------------------------
#In this project you will investigate the exponential distribution in R and
#compare it with the Central Limit Theorem. The exponential distribution can
# be simulated in R with rexp(n, lambda) where lambda is the rate parameter.
# The mean of exponential distribution is 1/lambda and the standard deviation
# is also 1/lambda. Set lambda = 0.2 for all of the simulations. You will investigate
# the distribution of averages of 40 exponentials. Note that you will need to do
# a thousand simulations.
# Illustrate via simulation and associated explanatory text the properties of the
# distribution of the mean of 40 exponentials. You should
# 1. Show the sample mean and compare it to the theoretical mean of the distribution.
# 2. Show how variable the sample is (via variance) and compare it to the theoretical
# variance of the distribution.
# 3. Show that the distribution is approximately normal.
# In point 3, focus on the difference between the distribution of a large collection
# of random exponentials and the distribution of a large collection of averages of 40
# exponentials.
# As a motivating example, compare the distribution of 1000 random uniforms
# hist(runif(1000))
# and the distribution of 1000 averages of 40 random uniforms
# mns = NULL
# for (i in 1 : 1000) mns = c(mns, mean(runif(40)))
# hist(mns)
# This distribution looks far more Gaussian than the original uniform distribution!
#-------------------------------------------------------------------------------------
# SOLUTION :
#-------------------------------------------------------------------------------------
# Lets try to analyse the given code and then proceed to problem
test_fun <- function()
{
par(mfrow=c(1,2))
hist(runif(1000))
mns = NULL
for (i in 1 : 1000) mns = c(mns, mean(runif(40)))
hist(mns)
}
# in the above code we can easily observe that Central limit thheorem
# shows it's effect , the first plot shows - that 1000 numbers are randomly selected
# from uniform distribution in range 0 to 1 and plotted
# and In the second plot
# only 40 random variables are selected from uniform distribution, but for 1000 times
# and the mean were stored in an array , and at last histogram of mean is plotted
# the plot revails gaussian properties, and shows the mean=.5 appears the most ,
# which is actually the theoritical mean of the uniform distribution when min=0 and max= 1
# respectively...
# Now again we have to use an another distribution , whose mean = 1/labmda = standard deviation
exp_clt <- function(n,lambda,simulation)
{
i <- 1
x <- seq(0,100,length=1000)
y <- dexp(x,rate=lambda)
mns <- NULL
while(i <= 1000)
{
mns <- c(mns,mean(rexp(n,lambda)))
i <- i + 1
}
# --------------------- part 1 --------------------------------------------------
print(paste("differnce in mean :",((1/lambda)-mean(mns))))
# --------------------- part 2 --------------------------------------------------
print(paste("differnce in standard deviation :",((1/lambda)-sd(mns))))
# --------------------- part 3 --------------------------------------------------
par(mfrow=c(1,2))
plot(x,y,type="l",lwd=2,col="red",main="plot of exp distribution:green line is mean")
abline(v=(1/lambda),col="green")
hist(mns,main="green line is mean")
abline(v=mean(mns))
}
|
ae99bcbce9e0ee7908506232f21241a2ed190e37
|
a706b6857ad346a17fc3ef89c401d3439998295e
|
/scripts/summary_info.R
|
1bd37e53ec2ebfcbd82cfd913abc2a0e6324d03a
|
[] |
no_license
|
AoxuanWang/group_3_midpoint_report
|
8b491f8469897d87478fc6f5a76f8af1d8002ef1
|
03a52cbcda3aa120bf637141ad4c585904d16a9c
|
refs/heads/master
| 2023-01-15T23:51:59.259543
| 2020-11-30T08:52:00
| 2020-11-30T08:52:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15
|
r
|
summary_info.R
|
## summary info
|
a72a96b403cf71e460afd69ffe65d372eef27b14
|
4f32160fe93781beaeb784e7751084dc7197dd10
|
/cachematrix.R
|
458ba2b22451492254bc704f4136b2113dece8dc
|
[] |
no_license
|
Madhanrd/ProgrammingAssignment2
|
fb992430669ba322c03961cd7d4c8896b645c2b3
|
c4b6e271260f421f48819725bc715b0a6288052a
|
refs/heads/master
| 2020-12-30T18:57:28.094600
| 2015-03-22T21:04:38
| 2015-03-22T21:04:38
| 32,648,709
| 0
| 0
| null | 2015-03-21T20:24:08
| 2015-03-21T20:24:08
| null |
UTF-8
|
R
| false
| false
| 1,714
|
r
|
cachematrix.R
|
## The makeCacheMatrix function creates a special "matrix"
## which is really a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(special = matrix()) {
inv <- NULL
## 1. set the value of the matrix
set <- function(y) {
special <<- y
inv <<- NULL
}
## 2. get the value of the matrix
get <- function() special
## 3. set the value of the inverse of the matrix
setinverse <- function(inverse) inv <<- inverse
## 4. get the value of the inverse of the matrix
getinverse <- function() inv
list(set = set,get = get,setinverse = setinverse,getinverse = getinverse)
}
## The cacheSolve function calculates the inverse of the special "matrix"
## the special "matrix" which created with the makeCacheMatrix function.
cacheSolve <- function(mat, ...) {
## Initially we need to run makeCacheMatrix() function to utilize this function
## It checks if inverse of vector has already been calculated.
## If so, it gets the inverse value from the cache and skips the current computation.
## Also it gives message to users like "getting cached data"
inverse <- mat$getinverse()
if(!is.null(inverse)) {
message("getting data from cache")
return(inverse)
}
##Else, it calculates the inverse of the matrix.
##And sets the value of the inverse in the cache via the setinverse function for future computation.
data <- mat$get()
inverse <- solve(data, ...)
mat$setinverse(inverse)
message("setting data to cache")
inverse ## Return a matrix that is the inverse of special matrix
}
|
928dee6078fdfd59966e4fa08a2dc4625ce61522
|
d593aafcabf6c2f69875ffbaace3b819996ecd11
|
/man/all_county_fips.Rd
|
4e9c13a6e8aee9f7b12da2386cbead5876dde420
|
[] |
no_license
|
GL-Li/ggtiger
|
6ba6347720262df7ad1db0fac425a99adac8d54a
|
a37cf0b8cce81a3af9ccdd9bc23d91210f4ca91c
|
refs/heads/master
| 2021-04-27T00:22:31.361743
| 2020-08-02T13:40:05
| 2020-08-02T13:40:05
| 123,801,296
| 14
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 438
|
rd
|
all_county_fips.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{all_county_fips}
\alias{all_county_fips}
\title{fips of each county}
\format{A data.table with 3109 rows and 3 variables
\describe{
\item{fips}{fips codes of state}
\item{state}{lower case state names}
\item{county}{lower case county names}
}}
\usage{
all_county_fips
}
\description{
fips of each county
}
\keyword{datasets}
|
bedd79950084318ba74aac9251441777b51b9ed6
|
03289c60f03e570b34ea923702c79f030d8fa676
|
/splitData.R
|
983b8ec6e7dda14cccab71e30898d10ca49d2c54
|
[] |
no_license
|
koehnden/is-seminar
|
289a8612e44c5a6d3073300bcfe9f703eff03186
|
612263ae8d215d939e529666e565a2c0b5a74434
|
refs/heads/master
| 2016-08-10T09:36:31.327207
| 2016-02-20T15:17:04
| 2016-02-20T15:17:04
| 50,588,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
splitData.R
|
################### Split the Data Sets #####################
#
# We use a ratio of 60% training and 40% test
splitData <- function(data, ratio=0.6){
# to get the same split every time
set.seed(12345)
# get the index of the training set
idx.tr <- createDataPartition(data$target, p=ratio, list=FALSE)
# partition in training and test set
trainData <- data[ idx.tr,]
testData <- data[-idx.tr,]
return(list(trainData=trainData, testData=testData))
}
# Corral
corral.split <- splitData(data=corral)
corral.tr <- corral.split$trainData
corral.test <- corral.split$testData
# Corral-100
corral_100.split <- splitData(data=corral_100)
corral_100.tr <- corral_100.split$trainData
corral_100.test <- corral_100.split$testData
# XOR-100
xor_100.split <- splitData(data=xor_100)
xor_100.tr <- xor_100.split$trainData
xor_100.test <- xor_100.split$testData
# parity 5+5
parity.split <- splitData(data=parity)
parity.tr <- parity.split$trainData
parity.test <- parity.split$testData
# Toy Data Sets
toy1.split <- splitData(data=toy.data1)
toy1.tr <- toy1.split$trainData
toy1.test <- toy1.split$testData
# Toy Data Sets
toy2.split <- splitData(data=toy.data2)
toy2.tr <- toy2.split$trainData
toy2.test <- toy2.split$testData
# Toy Data Sets
toy3.split <- splitData(data=toy.data3)
toy3.tr <- toy3.split$trainData
toy3.test <- toy3.split$testData
## Monk already splitted, we do not change the split because there is only noise
# in the training set
|
7efd607596fbb4f3e6df59c0a05daa48bc3e18fb
|
d06c45000e06134211c99add0beea81ee0452244
|
/03.DS-Probability/S3-Section 3/3.3 Section 3 Assessment/Assessment.R
|
0756802d684c5301606926051801427201a1efa2
|
[] |
no_license
|
dgpaniagua/data-science-assessments
|
24eb2d2c0f50a5a5f8b76db86e4f6fad0d803022
|
5c9af8ce74701f7df6431744128885e124cc7fe0
|
refs/heads/main
| 2023-09-05T14:01:52.179350
| 2021-11-22T04:40:59
| 2021-11-22T04:40:59
| 376,401,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,385
|
r
|
Assessment.R
|
########## Q1 ##########
n_question <- 44
choices <- 5
penalty_points <- -0.25
correct_points <- 1
p_correct <- 1/choices
p_wrong <- 1 - p_correct
##Q1a
p_correct
##Q1b
mu <- correct_points*p_correct + penalty_points*p_wrong
##Q1c: E[aX] = a.mu
E <- n_question * mu
##Q1d: SE[aX] = a.sigma
SE <- sqrt(n_question) * abs(penalty_points - correct_points) * sqrt(p_correct*p_wrong)
##Q1e
1 - pnorm(8, E, SE)
##Q1f
set.seed(21, sample.kind = "Rounding")
B <- 10000
S <- replicate(B, sum(sample(c(correct_points, penalty_points),
44, replace = TRUE,
prob = c(p_correct, p_wrong))))
mean(S>8)
########## Q2 ##########
choices <- 4
penalty_points <- 0
p_correct <- 1/choices
p_wrong <- 1 - p_correct
## Q2a
mu <- (correct_points*p_correct + penalty_points*p_wrong)
E <- n_question * mu
## Q2b
p_correct <- seq(0.25, 0.95, 0.05)
p_wrong <- 1 - p_correct
mu <- correct_points*p_correct + penalty_points*p_wrong
E <- n_question * mu
SE <- sqrt(n_question) * abs(penalty_points - correct_points) * sqrt(p_correct*p_wrong)
plot(p, 1 - pnorm(35, E, SE))
########## Q3 ##########
p_win <- 5/38
p_loss <- 1 - p_win
win <- 6
loss <- -1
## Q3a
mu <- p_win*win + p_loss*loss
## Q3b
sigma <- abs(loss - win) * sqrt(p_win*p_loss)
## Q3c
mu
## Q3d
sigma/sqrt(500)
## Q3e
E <- 500*mu
## Q3f
SE <- sqrt(500)*sigma
## Q3g
pnorm(0, E, SE)
|
325be09c7b2a2915c078ca20feb920727aab70dc
|
03112612a44cc95b08b982bcc2937210929520f9
|
/AddonSkeleton/prefs_info.r
|
a43398a6092597de61dfc625627b7ee271f4b9d0
|
[] |
no_license
|
HaikuArchives/ScannerBe
|
703b5875d471380d432790f0d1c7117f608d1d5b
|
c4f889efda4839f48eca92bf669a0ff54e0cdc80
|
refs/heads/master
| 2021-01-23T05:45:21.581579
| 2007-04-01T19:21:38
| 2007-04-01T19:21:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82
|
r
|
prefs_info.r
|
resource( 'info', 0, "ScannerBe Info" )
{
"My Scanner, Version 0.9.0 1/22/98."
}
|
4e205906e4a3aa304cc6166ea11142a3f992731c
|
c54cdc3354a193ad9c0a8f1f3bce7312e93389c7
|
/Plot2.R
|
23aee39a442dda03259e0303987af4db60743c30
|
[] |
no_license
|
MayHsu/ExploratoryDataAnalysis_Project2
|
08fbd9e2aa9cca9e8981ea1f0942bcd4be08492e
|
1677941810fad574933982531e8b6e1f7a6b66c8
|
refs/heads/master
| 2020-05-04T11:49:47.894644
| 2019-04-02T16:31:45
| 2019-04-02T16:31:45
| 179,116,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 936
|
r
|
Plot2.R
|
##declare url and zip file name
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
zipFile <- "exdata_data_NEI_data.zip"
##download file
if(!file.exists(zipFile)){
download.file(url,zipFile,method = "curl")
}
##unzip file
dataPath <- "exdata_data_NEI_data"
if(!file.exists(dataPath)){
unzip(zipFile)
}
##read NEI summary data
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
#Filter Baltimore data
NEI_Baltimore <- subset(NEI,fips=="24510")
##prepare summary data
aggregatedTotalByYear <- aggregate(Emissions ~ year, NEI_Baltimore, sum)
##create plot
png("plot2.png")
plot(aggregatedTotalByYear$year, aggregatedTotalByYear$Emissions, type = "l", lwd = 2,
xlab = "Year",
ylab = expression("Total Tons of PM2.5 Emissions"),
main = expression("Total Tons of PM2.5 Emissions in Baltimore City-US from 1999-2008"))
dev.off()
|
1d79463217aa877ebeccfbe2a22548b1628d739f
|
06f65e3dd02dfdda5c8e7d3415892592675bf9aa
|
/man/bn_tab_cap.Rd
|
852e5017fab65798f7914c20bb0b57a3f40ca89d
|
[] |
no_license
|
renlund/bokner
|
9af406a257ba350f8e67cd4a4024ee85d0d6decd
|
ade0a9ecd6bcd20f3c1cd6cbda3e4844b6003dd3
|
refs/heads/master
| 2023-04-07T00:01:56.807979
| 2023-03-23T13:54:38
| 2023-03-23T13:54:38
| 287,030,133
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 686
|
rd
|
bn_tab_cap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_caption.r
\name{bn_tab_cap}
\alias{bn_tab_cap}
\alias{bn_tc}
\title{table caption}
\usage{
bn_tab_cap(caption, label = NULL)
bn_tc(caption, label = NULL)
}
\arguments{
\item{caption}{character; the caption}
\item{label}{character; the label. By default, this will be the chunk label,
most of the time you do not want to change this}
}
\description{
get bookdown enumerable table caption with label from the chunk label as
default. The caption provided will be inserted after "Table:
(\\#tab:<chunk label>)"
}
\section{Functions}{
\itemize{
\item \code{bn_tc}: alias for \code{bn_tab_cap}
}}
|
4417646ae6a5f259b4772217a24183e0beb2470d
|
a1a41884c000415189f5b5f0b14c1dc33acadd87
|
/mapper_gff_vs_genome_hierarchical_nostrand.R
|
5a682946944cff4bb2ec45a2a3771e3cc6182a3e
|
[] |
no_license
|
silverkey/HALO
|
b2755c262190e3556158d3c2083b5a03adc01865
|
1e6146bea41b16d8e991a74501845ddf3d9cf817
|
refs/heads/master
| 2021-01-10T22:07:40.635833
| 2014-12-07T16:49:33
| 2014-12-07T16:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,282
|
r
|
mapper_gff_vs_genome_hierarchical_nostrand.R
|
# JACK THE MAPPER!!!
#
# VERSION gff_vs_genome_hierarchical_nostrand
#
# Takes a GFF file from a blast of 2 genomes and calculates the overlap
# between the features and a given genome.
#
# The GFF should be referred to the genome of which the script will
# load the annotations to calculate the overlap.
# It consider an hyerarchi of overlap defined in the variable overlap.order.
#
# Therefore it will return only the annotations associated to the first in
# hyerarchi. I.E. if a match overlap with a CDS and with an intron the script
# will report only one of the associations as defined in the order variable.
# ----------------- #
# RUNTIME VARS #
# ----------------- #
# The directory and the file GFF inside it to annotate
sel.dir = '/Users/remo/ANALYSIS/HALO/ANALYSIS/BLASTRES'
gff.file = 'HR_vs_CI_rm_top_CI.gff'
# Info about the GFF to analyze
gff.filename = 'HR_vs_CI_rm_top_CI.gff'
att.list = c('ID','Target','E','O')
# The directory containing the database or in which you will create it
dbdir = '/Users/remo/ANALYSIS/HALO/ANALYSIS/BLASTRES'
organism = 'cintestinalis'
download = 'F'
# Length of promoters
s.prox = 1000
e.prox = 0
# Completely noncodings transcripts will overlap exons but not cds nor utr
# We use a hierarchy order for which every time a range is overlapping a feature than
# it is associated to the feature and cut-out from the ranges, so that it cannot overlapping
# with the next order feature.
# Order to test overlap in this script (this is defined in the overlap.order variable:
# 1) cds
# 2) utr
# 3) exon
# 4) promoter
# 5) intron
# Define the order to test the overlap:
overlap.order = c('cds','utr5','utr3','exon','promoter','intron')
# ----------------- #
# FUNCTIONS #
# ----------------- #
get.promoters = function(transcripts,s.prox,e.prox) {
pos = strand(transcripts) == '+'
promoter.start = as.vector(ifelse(pos,start(transcripts)-s.prox,end(transcripts)-e.prox))
promoter.end = as.vector(ifelse(pos,start(transcripts)+e.prox,end(transcripts)+s.prox))
promoter.strand = ifelse(pos,'+','-')
promoter.chr = seqnames(transcripts)
promoter.tx_name = unlist(mcols(transcripts[,'tx_name'])[,1])
promoter.gene_id = unlist(mcols(transcripts[,'gene_id'])[,1])
promoters = GRanges(seqnames=promoter.chr,
ranges=IRanges(start=promoter.start,end=promoter.end),
strand=promoter.strand,
tx_name=promoter.tx_name,gene_id=promoter.gene_id)
promoters
}
# Used for transcripts
calculate.range.overlap = function(ranges,features,name,dir) {
overlap = findOverlaps(ranges,features,ignore.strand=T)
overlap = as.data.frame(overlap)
id = as.character(mcols(ranges[overlap$queryHits])$ID)
gene = as.character(mcols(features[overlap$subjectHits])$gene_id)
transcript = as.character(mcols(features[overlap$subjectHits])$tx_name)
strand = as.character(strand(features[overlap$subjectHits]))
res = as.data.frame(unique(cbind(id,name,gene,transcript,strand,dir)))
if(ncol(res)<6) no.res = ranges
if(ncol(res)==6) no.res = ranges[-overlap$queryHits]
list(res=res,no.res=no.res)
}
# Used for promoters, exons, introns
calculate.rangelist.overlap = function(ranges,features,name,txid.gid,dir) {
overlap = findOverlaps(ranges,features,ignore.strand=T)
overlap = as.data.frame(overlap)
id = as.character(mcols(ranges[overlap$queryHits])$ID)
gene = as.character(txid.gid[names(features[overlap$subjectHits]),2])
transcript = as.character(txid.gid[names(features[overlap$subjectHits]),1])
strand = as.character(txid.gid[names(features[overlap$subjectHits]),3])
res = as.data.frame(unique(cbind(id,name,gene,transcript,strand,dir)))
if(ncol(res)<6) no.res = ranges
if(ncol(res)==6) no.res = ranges[-overlap$queryHits]
list(res=res,no.res=no.res)
}
get.ranges.from.gff = function(gff.filename,att.list) {
gff = read.table(file=gff.filename,sep='\t',comment.char='',quote='')
colnames(gff) = c('seqid','source','type','start','end','score','strand','phase','attribute')
att.df = c()
for(i in 1:length(att.list)) {
a = att.list[i]
a = paste(a,'=',sep='')
val = unlist(lapply(as.character(gff$attribute),function(x)sub(a,'',unlist(strsplit(x,';'))[grep(a,unlist(strsplit(x,';')))])))
att.df = cbind(att.df,val)
}
att.df = as.data.frame(att.df)
colnames(att.df) = att.list
att.df$score = gff$score
ranges = GRanges(seqnames=gff$seqid,
ranges=IRanges(start=gff$start,end=gff$end),
strand=gff$strand)
mcols(ranges) = att.df
ranges
}
# ----------------- #
# SCRIPT #
# ----------------- #
library("GenomicFeatures")
if(download == 'T') {
transdb = makeTranscriptDbFromBiomart(biomart="ensembl",dataset=paste(organism,"gene_ensembl",sep='_'))
saveDb(transdb,file=paste(organism,"sqlite",sep='.'))
} else {
transdb = loadDb(file=paste(organism,"sqlite",sep='.'))
}
setwd(sel.dir)
# Build Features
transcripts = transcripts(transdb,columns=c("tx_id","tx_name","gene_id")) # range
cds = cdsBy(transdb,by='tx',use.names=T) # rangelist
exon = exonsBy(transdb,by='tx',use.names=T) # rangelist
intron = intronsByTranscript(transdb,use.names=T) # rangelist
utr5 = fiveUTRsByTranscript(transdb,use.names=T) # rangelist
utr3 = threeUTRsByTranscript(transdb,use.names=T) # rangelist
promoter = get.promoters(transcripts,s.prox,e.prox) # range
# Build a comfortable table to associate transcripts->genes->strands
txid.gid = as.data.frame(cbind(
unlist(mcols(transcripts[,'tx_name'])[,1]),
unlist(mcols(transcripts[,'gene_id'])[,1]),
as.character(strand(transcripts))))
colnames(txid.gid) = c('tx_id','gene_id','strand')
rownames(txid.gid)=txid.gid$tx_id
# Build table for results
features.res.cn = c('ID','overlap','gene','trans','strand','dir')
features.res = matrix(ncol=length(features.res.cn),nrow=0)
# Build the ranges of the fragments you want to test the overlap with features
ranges = get.ranges.from.gff(gff.filename,att.list)
rtab = as.data.frame(ranges)
# Start the overlap analysis
for(i in 1:length(overlap.order)) {
fname = overlap.order[i]
features = get(fname)
if(class(features)=='GRanges') {
lres = calculate.range.overlap(ranges,features,fname,'NA')
ranges = lres$no.res
if(ncol(lres$res)==ncol(features.res)) features.res = rbind(features.res,lres$res)
}
if(class(features)=='GRangesList') {
lres = calculate.rangelist.overlap(ranges,features,fname,txid.gid,'NA')
ranges = lres$no.res
if(ncol(lres$res)==ncol(features.res)) features.res = rbind(features.res,lres$res)
}
}
colnames(features.res) = features.res.cn
intergenic = as.data.frame(lres$no.res)
if(nrow(intergenic)>=1) {
intergenic$overlap='intergenic'
intergenic$gene=NA
intergenic$trans=NA
intergenic$strand=NA
intergenic$dir=NA
features.res = rbind(features.res,intergenic[,features.res.cn])
}
map = unique(features.res[,c('ID','overlap')])
final = merge(rtab,map,by.x='ID',by.y='ID',sort=F)
write.table(final,file='RESULTS_FEATURE_ANNOTATED.xls',sep="\t",row.names=F,quote=F)
|
1db9da93db8d5f4a470598f493bc8e5448d6aac7
|
0eac6f72fc988546ee57127b5741e3d12e2379a5
|
/tests/testthat/testMultinomCI.R
|
44efe0965dc28ccac749923fabc27373f21e9b48
|
[
"MIT"
] |
permissive
|
spedygiorgio/markovchain
|
4e70064a749f55d52bcdfffb7559e7027b161cc1
|
4eb1ec1b67f9231c129db5da3cc2ba51bd5f4121
|
refs/heads/master
| 2023-06-09T15:48:30.895373
| 2023-05-16T21:25:26
| 2023-05-16T21:25:26
| 31,481,152
| 111
| 58
|
NOASSERTION
| 2023-05-18T22:00:52
| 2015-02-28T23:54:38
|
R
|
UTF-8
|
R
| false
| false
| 1,681
|
r
|
testMultinomCI.R
|
#library(markovchain)
seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a")
mcfit<-markovchainFit(data=seq,byrow=TRUE)
# print(mcfit)
seqmat<-createSequenceMatrix(seq)
seqmat
mCI <- .multinomialCIRcpp(mcfit$estimate@transitionMatrix, seqmat, 0.95)
# print(mCI)
####end of creating multinomialCI
context("Multinomial confidence interval")
test_that("multinomial CI statisfay", {
# expect_equal(mCI$lowerEndpointMatrix, matrix(c(0.2222222,0.3333333,
# 0.5714286,0.1428571),nrow=2, byrow=TRUE, dimnames=list(c("a","b"),
# c("a","b"))
# ))
# expect_equal(mCI$upperEndpointMatrix, matrix(c(0.8111456,0.9222567,
# 1,0.6839473),nrow=2, byrow=TRUE, dimnames=list(c("a","b"),
# c("a","b"))
# ))
expect_equal(mCI$upperEndpointMatrix[2,1],1)
})
# Multinomial distribution with 3 classes, from which 79 samples
# were drawn: 23 of them belong to the first class, 12 to the
# second class and 44 to the third class. Punctual estimations
# of the probabilities from this sample would be 23/79, 12/79
# and 44/79 but we want to build 95% simultaneous confidence intervals
# for the true probabilities
# m = multinomialCI(c(23,12,44), 0.05)
# print(paste("First class: [", m[1,1], m[1,2], "]"))
# print(paste("Second class: [", m[2,1], m[2,2], "]"))
# print(paste("Third class: [", m[3,1], m[3,2], "]"))
# seq<-c(4, 5)
# m = multinomialCI(seq, 0.05)
# m
|
74f81301d15ec255cfd61450179af1025689afe7
|
7e7eec5a2d662e7d86fffb921ddf04c74c81cd68
|
/R/sci-notation.r
|
ae95d4c0efd3da0df5149c5205e52cf4fde023ad
|
[] |
no_license
|
ricschuster/Marxan_vs_ILP_paper
|
69a46ce3c86e6334cfaaf2eb236a47c819f38527
|
c87debff19238432119038a3c07dfbadab72a711
|
refs/heads/master
| 2023-03-11T20:46:21.999381
| 2020-11-05T21:09:18
| 2020-11-05T21:09:18
| 153,321,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 545
|
r
|
sci-notation.r
|
sci_notation <- function(x, digits = 1) {
if (length(x) > 1) {
return(append(sci_notation(x[1]), sci_notation(x[-1])))
}
if (is.na(x)) {
return(0)
} else if (x == 0) {
return(NA)
}
exponent <- floor(log10(x))
base <- round(x / 10^exponent, digits)
if (base == 1) {
e <- as.expression(substitute(10^exponent, list(exponent = exponent)))
} else {
e <- as.expression(substitute(base%*%10^exponent,
list(base = base, exponent = exponent)))
}
return(e)
}
|
45c8fcd5c34a78b2dc0f5a37622eeeff416eee39
|
7e0f4777f4e06b0ac72b90422ac0d9c765767755
|
/veghf/veghf-wetlands.R
|
9ba57844bb117e7f31a8e25cec54cb7e6a8eb357
|
[] |
no_license
|
psolymos/abmianalytics
|
edd6a040082260f85afbf4fc25c4f2726b369392
|
9e801c2c564be155124109b4888d29c80bd1340d
|
refs/heads/master
| 2023-01-30T05:00:32.776882
| 2023-01-21T05:36:23
| 2023-01-21T05:36:23
| 34,713,422
| 0
| 7
| null | 2017-01-20T19:07:59
| 2015-04-28T06:39:37
|
R
|
UTF-8
|
R
| false
| false
| 5,660
|
r
|
veghf-wetlands.R
|
HF_VERSION <- "2014_coarse" # load 2014 defs
source("~/repos/abmianalytics/veghf/veghf-setup.R")
load(file.path(ROOT, VER, "data", "analysis", "ages-by-nsr.Rdata"))
## wetland zones ----------------------------------------------------
## Buffers
f <- file.path(ROOT, VER, "data", "raw", "veghf",
"wetland", "VegV6VerifiedHF_summaryOnBufRings_allYear.csv")
dw250m <- read.csv(f)
## site label issue from 2016
levels(dw250m$Pin_Wetland_ID)[levels(dw250m$Pin_Wetland_ID) == "W-936"] <- "W-956"
dw250m$Site_YEAR <- with(dw250m,
interaction(Pin_Wetland_ID, survey_Year, sep="_", drop=TRUE))
head(dw250m)
table(dw250m$BUFF_DIST)
setdiff(levels(dw250m$FEATURE_TY), levels(hftypes$FEATURE_TY))
dw20m <- dw250m[dw250m$BUFF_DIST <= 20,]
dw100m <- dw250m[dw250m$BUFF_DIST <= 100,]
ddw20m <- make_vegHF_wide_v6(dw20m,
col.label="Site_YEAR",
col.year="survey_Year",
col.HFyear="YEAR_1",
sparse=TRUE, HF_fine=FALSE) # don't use refined classes
ddw20m$scale <- "0-20 m buffer around wetlands"
dx <- nonDuplicated(dw20m, Site_YEAR, TRUE)[rownames(ddw20m[[1]]),]
ddw20m <- fill_in_0ages_v6(ddw20m, dx$NSRNAME, ages_list)
ddw100m <- make_vegHF_wide_v6(dw100m,
col.label="Site_YEAR",
col.year="survey_Year",
col.HFyear="YEAR_1",
sparse=TRUE, HF_fine=FALSE) # don't use refined classes
ddw100m$scale <- "0-100 m buffer around wetlands"
dx <- nonDuplicated(dw100m, Site_YEAR, TRUE)[rownames(ddw100m[[1]]),]
ddw100m <- fill_in_0ages_v6(ddw100m, dx$NSRNAME, ages_list)
ddw250m <- make_vegHF_wide_v6(dw250m,
col.label="Site_YEAR",
col.year="survey_Year",
col.HFyear="YEAR_1",
sparse=TRUE, HF_fine=FALSE) # don't use refined classes
ddw250m$scale <- "0-250 m buffer around wetlands"
dx <- nonDuplicated(dw250m, Site_YEAR, TRUE)[rownames(ddw250m[[1]]),]
ddw250m <- fill_in_0ages_v6(ddw250m, dx$NSRNAME, ages_list)
sites <- droplevels(dx[,c("Site_YEAR","Pin_Wetland_ID","survey_Year",
"LUF_NAME", "BASIN", "NRNAME", "NSRNAME")])
## catchment
f <- file.path(ROOT, VER, "data", "raw", "veghf",
"wetland", "VegV6VerifiedHF_summaryOnCatchment_allYear.csv")
dwCat <- read.csv(f)
## site label issue from 2016 -- already cleaned up
#levels(dwCat$Pin_Wetland_ID)[levels(dwCat$Pin_Wetland_ID) == "W-936"] <- "W-956"
dwCat$Site_YEAR <- with(dwCat,
interaction(Pin_Wetland_ID, year, sep="_", drop=TRUE))
setdiff(levels(dwCat$FEATURE_TY), levels(hftypes$FEATURE_TY))
ddwCat <- make_vegHF_wide_v6(dwCat,
col.label="Site_YEAR",
col.year="year",
col.HFyear="YEAR_1",
sparse=TRUE, HF_fine=FALSE) # don't use refined classes
ddwCat$scale <- "Catchment around wetlands"
dx <- nonDuplicated(dwCat, Site_YEAR, TRUE)[rownames(ddwCat[[1]]),]
ddwCat <- fill_in_0ages_v6(ddwCat, dx$NSRNAME, ages_list)
all(rownames(ddw20m[[1]]) == rownames(ddw100m[[1]]))
all(rownames(ddw20m[[1]]) == rownames(ddw250m[[1]]))
compare_sets(rownames(ddw20m[[1]]), rownames(ddwCat[[1]]))
save(ddw20m, ddw100m, ddw250m, ddwCat, sites,
file=file.path(ROOT, VER, "data", "analysis", "wetland",
"veg-hf_wetland_v6-fixage0.Rdata"))
## xy/clim/etc
climWet <- read.csv(file.path(ROOT, VER, "data", "climate",
"climates_on_wetlandPin.csv"))
climWet2 <- read.csv(file.path(ROOT, VER, "data", "veghf", "wetlands",
"wetlandSite2015_climates_Luf_NatReg.csv"))
climWet$ABMISite <- NULL
colnames(climWet)[colnames(climWet) == "Pin_Wetland_ID"] <- "Wetland_ID"
compare_sets(colnames(climWet), colnames(climWet2))
climWet <- rbind(climWet, climWet2[,colnames(climWet)])
levels(climWet$Wetland_ID) <- toupper(levels(climWet$Wetland_ID))
colnames(climWet)[colnames(climWet) == "Eref"] <- "PET"
colnames(climWet)[colnames(climWet) ==
"Populus_tremuloides_brtpred_nofp"] <- "pAspen"
climWet$OBJECTID <- NULL
climWet$Site_YEAR <- with(climWet,
interaction(Wetland_ID, year, sep="_", drop=TRUE))
compare_sets(rownames(ddw250m[[1]]), levels(climWet$Site_YEAR))
setdiff(rownames(ddw250m[[1]]), levels(climWet$Site_YEAR))
setdiff(levels(climWet$Site_YEAR), rownames(ddw250m[[1]]))
source("~/repos/abmianalytics/species/00globalvars_wetland.R")
sort(REJECT)
#totalA <- read.csv(file.path(ROOT, VER, "data/veghf/wetlands",
# "BufferRings_all_year_July14_2015.csv"))
ii <- intersect(levels(climWet$Site_YEAR), rownames(ddw250m[[1]]))
ii <- ii[ii != "W-213_2013"] # outside of AB bound
for (i in 1:4) {
ddw20m[[i]] <- ddw20m[[i]][ii,]
ddw100m[[i]] <- ddw100m[[i]][ii,]
ddw250m[[i]] <- ddw250m[[i]][ii,]
}
rownames(climWet) <- climWet$Site_YEAR
climWet <- droplevels(climWet[ii,])
fsw <- file.path(ROOT, VER, "data", "veghf", "wetlands",
"sketch_inter_BufRings_allYearMerged.csv")
dsw <- read.csv(fsw)
fsw2 <- file.path(ROOT, VER, "data", "veghf", "wetlands",
"sketch_inter_BufRings_Year2015.csv")
dsw2 <- read.csv(fsw2)
dsw$LinkID <- NULL
compare_sets(colnames(dsw), colnames(dsw2))
setdiff(colnames(dsw), colnames(dsw2))
setdiff(colnames(dsw2), colnames(dsw))
dsw <- rbind(dsw, dsw2)
dsw$Site_YEAR <- with(dsw,
interaction(Pin_Wetland_ID, Year_, sep="_", drop=TRUE))
## fix age 0 in saved files -----------------------------
load(file.path(ROOT, VER, "out", "kgrid", "veg-hf_avgages_fix-fire.Rdata"))
sum(ddw20m[[1]][,Target0])
ddw20m <- fill_in_0ages(ddw20m, climWet$NSRNAME)
sum(ddw20m[[1]][,Target0])
sum(ddw100m[[1]][,Target0])
ddw100m <- fill_in_0ages(ddw100m, climWet$NSRNAME)
sum(ddw100m[[1]][,Target0])
sum(ddw250m[[1]][,Target0])
ddw250m <- fill_in_0ages(ddw250m, climWet$NSRNAME)
sum(ddw250m[[1]][,Target0])
if (SAVE)
save(ddw20m, ddw100m, ddw250m, climWet,
file=file.path(ROOT, VER, "out", "wetlands",
"veg-hf_wetlands_fix-fire_fix-age0.Rdata"))
|
2bacfb6c52c80cfadebd9670e1dca1bf976038d4
|
0a5d2193a1a0b1f1cd2113d029c847761860ce97
|
/H2O RF.R
|
a27a1fcd279fae36fef7770cda5155370867c0d1
|
[] |
no_license
|
Kinshuk86/BenchMarking-Predictive-Tools
|
1f10c24371ea7cfa686df11aa76eabdc10d3ed42
|
d0989015280eb9139c275a5d47ea769be63241d1
|
refs/heads/master
| 2021-01-23T13:29:45.468093
| 2017-09-06T22:36:59
| 2017-09-06T22:36:59
| 102,663,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,328
|
r
|
H2O RF.R
|
if (! ("methods" %in% rownames(installed.packages()))) { install.packages("methods") }
if (! ("statmod" %in% rownames(installed.packages()))) { install.packages("statmod") }
if (! ("stats" %in% rownames(installed.packages()))) { install.packages("stats") }
if (! ("graphics" %in% rownames(installed.packages()))) { install.packages("graphics") }
if (! ("RCurl" %in% rownames(installed.packages()))) { install.packages("RCurl") }
if (! ("jsonlite" %in% rownames(installed.packages()))) { install.packages("jsonlite") }
if (! ("tools" %in% rownames(installed.packages()))) { install.packages("tools") }
if (! ("utils" %in% rownames(installed.packages()))) { install.packages("utils") }
library(h2o)
h2o.init()
prosPath <- system.file("extdata", "prostate.csv", package="h2o")
# Imports data set
prostate.hex = h2o.importFile(path = prosPath, destination_frame="prostate.hex")
# Converts current data frame (prostate data set) to an R data frame
prostate.R <- as.data.frame(prostate.hex)
# Displays a summary of data frame where the summary was executed in R
summary(prostate.R)
h2o.init(ip = "datanoded01.dev.bigdata.jcpcloud2.net", port = 54321)
#dx_train1 <- h2o.importFolder(path = "datanoded01.dev.bigdata.jcpcloud2.net/tmp", destination_frame = "train-0.01m.csv")
#pathToData <- "datanoded01.dev.bigdata.jcpcloud2.net/tmp/train-0.01m.csv"
#airlines.hex = h2o.importFile(H2OServer, path = pathToData, key = "train-0.01m.csv.hex")
#prosPath = system.file("/tmp/", "train-0.01m.csv", package = "H2OServer")
#dx.hex <- as.data.frame(H2OServer, path = "/tmp/train-0.01m.csv", destination_frame = "dx.hex")
#dx_train <- as.data.frame("/tmp/train-0.01m.csv")
#dx_test1 <- as.data.frame("/tmp/test.csv")
#train.h2o <- as.h2o("/tmp/train-0.01m.csv", header = T, sep = ",")
#Xnames <- names(dx_train)[which(names(dx_train)!="dep_delayed_15min")]
#system.time({
# md <- h2o.randomForest(x = Xnames, y = "dep_delayed_15min", training_frame = train.h2o, ntrees = 500)
#})
#f <- h2o.uploadFile("train-1m.csv")
#t <- h2o.uploadFile("test.csv")
#Xnames <- names(f)[which(names(f)!="dep_delayed_15min")]
#system.time({
# md <- h2o.randomForest(x = Xnames, y = "dep_delayed_15min", training_frame = f, ntrees = 500)
#})
#system.time({
# print(h2o.auc(h2o.performance(md, t)))
#})
install.packages("RPostgreSQL")
install.packages("RJDBC")
library(rJava)
library(RJDBC)
library(RPostgreSQL)
library(dplyr)
library(plyr)
library(ggplot2)
myRedshift <- src_postgres('omnianalyticsproddb01',
host = 'analyticstest02.c85v4o6majw1.us-east-1.redshift.amazonaws.com',
port = "5439",
user = "readonly",
password = "Legacy6501",
options="-c search_path=analytics")
orderxml <- tbl(myRedshift, "orderxml")
glimpse(orderxml)
h2o.removeAll()
path = system.file("extdata", "prostate.csv", package
= "h2o")
h2o_df = h2o.importFile(path)
gaussian.fit = h2o.glm(y = "VOL", x = c("AGE", "RACE",
"PSA", "GLEASON"), training_frame = h2o_df,
family = "gaussian")
m1 = h2o.glm(training_frame = data$Train, validation_frame = data$Valid, x = x, y = y,family='multinomial',solver='L_BFGS')
h2o.confusionMatrix(m1, valid=TRUE)
|
4d839d25e8b2f60fbe3b4e86379de14df4161b6a
|
6ad04ba4897e75f8940b0a6caadb03177c75da7e
|
/R/imu.R
|
dd439a43152f5dde0e4194ff99c904bf88673f08
|
[] |
no_license
|
munsheet/simts
|
62da9689060e9f1f739e1e7a8cbbf9275ce752e3
|
0e8c7c16a6392e00c2dab67b79c38ef701c49018
|
refs/heads/master
| 2021-01-10T00:06:57.143932
| 2017-06-23T18:21:05
| 2017-06-23T18:21:05
| 93,349,929
| 0
| 0
| null | 2017-06-05T00:24:51
| 2017-06-05T00:24:51
| null |
UTF-8
|
R
| false
| false
| 12,375
|
r
|
imu.R
|
# Copyright (C) 2014 - 2017 James Balamuta, Stephane Guerrier, Roberto Molinari
#
# This file is part of simts R Methods Package
#
# The `simts` R package is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The `simts` R package is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' @title Create an IMU Object
#' @description Builds an IMU object that provides the program with gyroscope, accelerometer, and axis information per column in the dataset.
#' @param data A \code{vector} which contains data, or a \code{matrix} or \code{data.frame} which contains the data in each column.
#' @param gyros A \code{vector} that contains the index of columns where gyroscope data (such as Gyro. X, Gyro. Y and Gyro. Z) is placed.
#' @param accels A \code{vector} that contains the index of columns where accelerometer data (such as Accel. X, Accel. Y and Accel. Z) is placed.
#' @param axis A \code{vector} that indicates the axises, such as 'X', 'Y', 'Z'. Please supply the axises for gyroscope data before that for accelerometer data, if gyroscope data exists.
#' @param freq An \code{integer} that provides the frequency for the data.
#' @param unit A \code{string} that contains the unit expression of the frequency. Default value is \code{NULL}.
#' @param name A \code{string} that provides an identifier to the data. Default value is \code{NULL}.
#' @return An \code{imu} object in the following attributes:
#' \describe{
#' \item{sensor}{A \code{vector} that indicates whether data contains gyroscope sensor, accelerometer sensor, or both.}
#' \item{num.sensor}{A \code{vector} that indicates how many columns of data are for gyroscope sensor and accelerometer sensor.}
#' \item{axis}{Axis value such as 'X', 'Y', 'Z'.}
#' \item{freq}{Observations per second.}
#' \item{unit}{String representation of the unit.}
#' \item{name}{Name of the dataset.}
#' }
#' @details
#' \code{data} can be a numeric vector, matrix or data frame.
#'
#' \code{gyros} and \code{accels} cannot be \code{NULL} at the same time, but it will be fine if one of them is \code{NULL}.
#' In the new implementation, the length of \code{gyros} and \code{accels} do not need to be equal.
#'
#' In \code{axis}, duplicate elements are not alowed for each sensor. In the new implementation, please specify the axis for each column of data.
#' \code{axis} will be automatically generated if there are less than or equal to 3 axises for each sensor.
#'
#' @author James Balamuta and Wenchao Yang
#' @examples
#' \dontrun{
#' if(!require("imudata")){
#' install_imudata()
#' library("imudata")
#' }
#'
#' data(imu6)
#'
#' # Example 1 - Only gyros
#' test1 = imu(imu6, gyros = 1:3, axis = c('X', 'Y', 'Z'), freq = 100)
#' df1 = wvar.imu(test1)
#' plot(df1)
#'
#' # Example 2 - One gyro and one accelerometer
#' test2 = imu(imu6, gyros = 1, accels = 4, freq = 100)
#' df2 = wvar.imu(test2)
#' plot(df2)
#'
#' # Example 3 - 3 gyros and 3 accelerometers
#' test3 = imu(imu6, gyros = 1:3, accels = 4:6, axis =
#' c('X', 'Y', 'Z', 'X', 'Y', 'Z'), freq = 100)
#' df3 = wvar.imu(test3)
#' plot(df3)
#'
#' # Example 4 - Custom axis
#' test4 = imu(imu6, gyros = 1:2, accels = 4:6, axis =
#' c('X', 'Y', 'X', 'Y', 'Z'), freq = 100)
#' df4 = wvar.imu(test4)
#' plot(df4)
#' }
imu = function(data, gyros = NULL, accels = NULL, axis = NULL, freq = NULL, unit = NULL, name = NULL){
# 1. Check object
if(is.null(data) || !(is.numeric(data)||is.data.frame(data)||is.matrix(data)) ) {
stop('Data must a numeric vector, data frame, or matrix.')
}
if(is.numeric(data)){
data = as.matrix(data)
}
if(is.data.frame(data)){
data = as.matrix(data)
}
colnames(data) = NULL
# 2. Check gyro and acce
gyro = gyros
acce = accels
ngyros = length(gyro)
nacces = length(acce)
if(is.null(gyro) && is.null(acce)){
stop("At lease one of parameters ('gyros' or 'accels') must be not NULL.")
}
# Merge indices
index = c(gyro, acce)
if(!is.whole(index)){
stop("Paramater 'gyros' and 'accels' must be vectors of integers.")
}
if(any(gyro > ncol(data)) || any(gyro < 1)){
stop('Index for gyroscope is out of bound.')
}
if(any(acce > ncol(data)) || any(acce < 1)){
stop('Index for accelerometer is out of bound.')
}
# 3. Check 'axis': if the user supplies the axis, check input to make sure it is 'good'.
if(!is.null(axis)){
if(length(axis)==((ngyros + nacces)/2) && ngyros!=0 && nacces!=0){
axis = rep(axis, times = 2)
}else if (length(axis) != (ngyros + nacces)){
stop('Please specify the axis for each column of data.')
}
if (ngyros == 0||nacces == 0){
if( anyDuplicated(axis) ){
stop('`axis` cannot have duplicated elements.')
}
}else if (anyDuplicated(axis[1:ngyros]) || anyDuplicated(axis[(ngyros+1):length(axis)])){
stop('For each sensor, `axis` cannot have duplicated elements.')
}
}else{
# if the user doesn't supply the axis, guess number of sensors
if(ngyros > 0 && nacces > 0){
naxis = if(ngyros == nacces) ngyros else 0
}else{
naxis = if(ngyros != 0) ngyros else nacces
}
axis = switch(as.character(naxis),
'1' = 'X',
'2' = c('X','Y'),
'3' = c('X','Y','Z'),
stop('axis cannot be automatically generated. Please supply it by specifying "axis = ...".')
)
if(ngyros == nacces){
axis = rep(axis, times = 2)
}
}
# 4. Check freq
if(is.null(freq)){
freq = 100
warning("`freq` has not been specified. Setting `imu` data's frequency to 100. \n Please recreate the object if the frequency is incorrect.")
}
if(!is(freq,"numeric") || length(freq) != 1){ stop("'freq' must be one numeric number.") }
if(freq <= 0) { stop("'freq' must be larger than 0.") }
# 5. do not need 'start' and 'end'
# 6. unit = NULL
if(!is.null(unit)){
if(!unit %in% c('ns', 'ms', 'sec', 'second', 'min', 'minute', 'hour', 'day', 'mon', 'month', 'year')){
stop('The supported units are "ns", "ms", "sec", "min", "hour", "day", "month", "year". ')
}
}
create_imu(data[,index, drop = F], ngyros, nacces, axis, freq, unit = unit, name = name)
}
#' @title Internal IMU Object Construction
#' @description Internal quick build for imu object.
#' @param data A \code{matrix} with dimensions N x length(index)
#' @param ngyros An \code{integer} containing the number of gyroscopes
#' @param naccess An \code{integer} containing the number of accelerometers
#' @param axis A \code{vector} unique representation of elements e.g. x,y,z or x,y or x.
#' @param freq An \code{integer} that provides the frequency for the data.
#' @param unit A \code{string} that contains the unit expression of the frequency. Default value is \code{NULL}.
#' @param name A \code{string} that provides an identifier to the data. Default value is \code{NULL}.
#' @param stype A \code{string} that describes the sensor type. Default value is \code{NULL}.
#' @return An \code{imu} object class.
#' @author James Balamuta
#' @keywords internal
create_imu = function(data, ngyros, nacces, axis, freq, unit = NULL, name = NULL, stype = NULL){
if(!is.null(ncol(data))){
if(ngyros>0 && nacces>0){
colnames(data) = paste( c(rep('Gyro.', times = ngyros), rep('Accel.', times = nacces)), axis)
}else if (ngyros > 0){
colnames(data) = c(paste(rep('Gyro.', times = ngyros), axis))
}else{
colnames(data) = c(paste(rep('Accel.', times = nacces), axis))
}
}
out = structure(data,
sensor = c(rep("Gyroscope",ngyros), rep("Accelerometer",nacces)),
num.sensor = c(ngyros, nacces),
axis = axis,
freq = freq,
unit = unit,
name = name,
stype = stype,
class = c("imu","matrix"))
}
#' Subset an IMU Object
#'
#' Enables the IMU object to be subsettable. That is, you can load all the data in and then select certain properties.
#' @export
#' @param x A \code{imu} object
#' @param i A \code{integer vector} that specifies the rows to subset. If blank, all rows are selected.
#' @param j A \code{integer vector} that specifies the columns to subset. Special rules apply see details.
#' @param drop A \code{boolean} indicating whether the structure should be preserved or simplified.
#' @return An \code{imu} object class.
#' @details
#' When using the subset operator, note that all the Gyroscopes are placed at the front of object
#' and, then, the Accelerometers are placed.
#'
#' @examples
#' \dontrun{
#' if(!require("imudata")){
#' install_imudata()
#' library("imudata")
#' }
#'
#' data(imu6)
#'
#' # Create an IMU Object that is full.
#' ex = imu(imu6, gyros = 1:3, accels = 4:6, axis = c('X', 'Y', 'Z', 'X', 'Y', 'Z'), freq = 100)
#'
#' # Create an IMU object that has only gyros.
#' ex.gyro = ex[,1:3]
#' ex.gyro2 = ex[,c("Gyro. X","Gyro. Y","Gyro. Z")]
#'
#' # Create an IMU object that has only accels.
#' ex.accel = ex[,4:6]
#' ex.accel2 = ex[,c("Accel. X","Accel. Y","Accel. Z")]
#'
#' # Create an IMU object with both gyros and accels on axis X and Y
#' ex.b = ex[,c(1,2,4,5)]
#' ex.b2 = ex[,c("Gyro. X","Gyro. Y","Accel. X","Accel. Y")]
#'
#' }
#'
`[.imu` = function(x, i, j, drop = FALSE){
axis = attr(x,"axis")
sensor = attr(x,"sensor")
num.sensor = attr(x,"num.sensor")
# If j is missing, then it is a subset by row (not column!)
if(!missing(j)){
# Select column names picked by user
if(is(j, "character")){
nc = j
}else{
# Otherwise, use j as a numeric.
nc = colnames(x)[j]
}
# TO DO:
# Rewrite the selection using indices now that
# we are no longer bound by naming schemes.
# Remove structure to get Gyros/Accels
g = gsub("\\..*","",nc)
ng = table(g)
# Remove structure to get at X,Y,Z axis.
g2 = gsub(".* ","",nc)
axis = g2
num.sensor = c({if(!is.na(ng["Gyro"])) ng["Gyro"] else 0}, {if(!is.na(ng["Accel"])) ng["Accel"] else 0})
}
create_imu(NextMethod("[", drop = drop),
num.sensor[1], num.sensor[2], axis, attr(x,"freq"), attr(x,"unit"), attr(x,"name"), attr(x,"stype"))
}
#' @title Read an IMU Binary File into R
#'
#' @description
#' Process binary files within the
#'
#' @param file A \code{string} containing file names or paths.
#' @param type A \code{string} that contains a supported IMU type given below.
#' @param unit A \code{string} that contains the unit expression of the frequency. Default value is \code{NULL}.
#' @param name A \code{string} that provides an identifier to the data. Default value is \code{NULL}.
#' @details
#' Currently supports the following IMUs:
#' \itemize{
#' \item IMAR
#' \item LN200
#' \item LN200IG
#' \item IXSEA
#' \item NAVCHIP_INT
#' \item NAVCHIP_FLT
#' }
#' @author James Balamuta
#' We hope to soon be able to support delimited files.
#' @return An \code{imu} object that contains 3 gyroscopes and 3 accelerometers in that order.
#' @references
#' Thanks goes to Philipp Clausen of Labo TOPO, EPFL, Switzerland, topo.epfl.ch, Tel:+41(0)21 693 27 55
#' for providing a matlab function that reads in IMUs.
#' This function is a heavily modified port of MATLAB code into Armadillo/C++.
#' @examples
#' \dontrun{
#' # Relative
#' setwd("F:/")
#'
#' a = read.imu(file = "Documents/James/short_test_data.imu", type = "IXSEA")
#'
#' # Fixed path
#' b = read.imu(file = "F:/Desktop/short_test_data.imu", type = "IXSEA")
#' }
read.imu = function(file, type, unit = NULL, name = NULL){
d = read_imu(file_path = file, imu_type = type)
obj = create_imu(d[[1]][,-1], 3, 3, c('X','Y','Z','X','Y','Z'), d[[2]][1], unit = unit, name = name, stype = type)
rownames(obj) = d[[1]][,1]
obj
}
|
4566dde52b979d784bbe98ba2e27c3111ce64b02
|
6db4bf0d2e83f5cdea196658a5eebfe875f321ae
|
/ass/Ques1/rnuif10.r
|
3365fdf01a9e971f22f81618377f747efd0a1f6e
|
[] |
no_license
|
akar5h/CS251-252
|
f40c252461fcc65d6b4a6bca9ab3a97926a8e96c
|
2da6d9240948e123487edc4588b17915d632c469
|
refs/heads/master
| 2020-03-09T12:55:42.172281
| 2018-04-09T15:59:12
| 2018-04-09T15:59:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26
|
r
|
rnuif10.r
|
u10=runif(100,0.0,1.0)
u10
|
1e4f394bda25d7a98386e49c636e4b9b1c62bf1e
|
2f22faf604765348216fc9332d6611e01a6dd5f2
|
/WKRDB-EST2/subGroup6/icesRDBES/data-raw/getTablesInHierarchies.r
|
3b84318203a538bdb6c8d1d12d205eee511b8d31
|
[
"MIT"
] |
permissive
|
ices-eg/WK_RDBES
|
6d54339f8bbc075f6ac05d65bbba7cdbeebc4233
|
bda1fadf7f964b2f2578630621d6c2f89341a848
|
refs/heads/master
| 2022-10-21T23:12:20.395789
| 2022-10-07T14:45:53
| 2022-10-07T14:45:53
| 211,826,167
| 4
| 6
| null | 2021-09-27T08:05:03
| 2019-09-30T09:39:29
|
R
|
UTF-8
|
R
| false
| false
| 2,634
|
r
|
getTablesInHierarchies.r
|
# Use the RDBES xsd files to determine which tables are
# required in the different RDBES hierachies
# Set to TRUE if you want to download the
# lastest xsd files from GitHub
downloadFromGitHub <- TRUE
gitHubFileLocation <-
"https://api.github.com/repos/ices-tools-dev/RDBES/contents/XSD-files"
# The folder to read the files from. If you are
# downloading from GitHub a copy of the latest files will be saved here
fileLocation <- "data-raw/"
# STEP 1) Get the BaseTypes file (if required)
if (downloadFromGitHub) {
myHierarchyFiles <- NULL
myResponse <- httr::GET(gitHubFileLocation)
filesOnGitHub <- httr::content(myResponse)
for (myFile in filesOnGitHub) {
myGitHubFile <- data.frame(fileName = myFile$name
, downloadURL = myFile$download_url)
if (is.null(myHierarchyFiles)) {
myHierarchyFiles <- myGitHubFile
} else {
myHierarchyFiles <- rbind(myHierarchyFiles, myGitHubFile)
}
}
# Sub-set to the files we are interested in
myHierarchyFiles <- myHierarchyFiles[grepl("^H.*xsd$"
, myHierarchyFiles$fileName), ]
print(paste("Downloading ", nrow(myHierarchyFiles)
, " files from GitHub", sep = ""))
# Download our files
for (i in 1:seq_len(myHierarchyFiles)) {
anHierarchyFile <- RCurl::getURL(myHierarchyFiles[i, "downloadURL"])
# save the file locally
writeLines(anHierarchyFile
, paste(fileLocation,
myHierarchyFiles[i, "fileName"], sep = "")
)
}
}
# Read all the H.*xsd files
filesToRead <- list.files(path = fileLocation
, pattern = "^H.*xsd$"
, recursive = FALSE
, full.names = FALSE)
myHierarchyTables <- list()
for (fileToParse in filesToRead) {
fileToParse <- paste(fileLocation, fileToParse, sep = "")
# STEP 2) Parse the XML
doc <- XML::xmlTreeParse(fileToParse, useInternal = TRUE)
myXML <- XML::xmlToList(doc)
myResults <- NULL
hierachyName <- NULL
for (myElement in myXML[names(myXML) == "complexType"]) {
myAttr <- myElement$.attrs
names(myAttr) <- NULL
if (grepl("^H.*", myAttr)) {
hierachyName <- myAttr
}
if (nchar(myAttr) == 2 & !grepl("^H.*", myAttr)) {
if (is.null(myResults)) {
myResults <- c(myAttr)
} else {
myResults <- c(myResults, myAttr)
}
}
}
# Add to our list of results
myHierarchyTables[[hierachyName]] <- myResults
}
tablesInRDBESHierarchies <- myHierarchyTables
usethis::use_data(tablesInRDBESHierarchies, overwrite = TRUE)
|
847c947a184d3ce9e58e74b90e17e84559803ebb
|
cd32d300faed890d520e27399e93662b47388926
|
/PraceDomowe/PD7/Michal_Pastuszka_PD7/dane.R
|
1b32c60acab609290b88389f133c31229450b93f
|
[] |
no_license
|
Siemashko/TechnikiWizualizacjiDanych2018
|
f01a7b3b513cf07d31261d176e56fb7b99583df6
|
c6072805f3a9b3c1f0d7db34c2728cb4e20d562f
|
refs/heads/master
| 2020-03-30T16:08:56.922922
| 2019-01-16T11:08:26
| 2019-01-16T11:08:26
| 151,395,331
| 0
| 1
| null | 2019-01-09T11:39:22
| 2018-10-03T10:24:06
|
HTML
|
UTF-8
|
R
| false
| false
| 1,288
|
r
|
dane.R
|
library(readxl)
library(data.table)
library(ggplot2)
library(ggmap)
pm10 <- data.table(read_excel("dane/to/2002_PM10_24g.xlsx"))
pm25 <- data.table(read_excel("dane/to/2002_PM2.5_24g.xlsx"))
lata <- seq(2003, 2017)
for(i in 1:15) {
x = data.table(read_excel(paste("dane/to/", lata[i], "_PM10_24g.xlsx", sep="")))
y = data.table(read_excel(paste("dane/to/", lata[i], "_PM2.5_24g.xlsx", sep="")))
pm10 <- rbind(pm10, x, fill=TRUE)
pm25 <- rbind(pm25, y, fill=TRUE)
}
nazwy <- data.table(read_excel("Kopia Kody_stacji_pomiarowych.xlsx"))
setkey(nazwy, `KOD NOWY`)
colnames(pm10)[1] <- "czas"
colnames(pm25)[1] <- "czas"
pm10$czas <- as.Date(pm10$czas)
pm25$czas <- as.Date(pm25$czas)
meta <- data.table(read_excel("Metadane_wer20180829.xlsx"))
colnames(meta)[c(15,16)] <- c("lat", "lng")
setkey(meta, `Kod stacji`)
nazwy <- meta[nazwy,]
nazwy <- nazwy[,c("Kod stacji", "Nazwa stacji", "lat", "lng")]
pm <- list(pm10, pm25)
setkey(nazwy, `Kod stacji`)
nazwy25 <- nazwy[nazwy$`Kod stacji`%in%colnames(pm25),]
setkey(nazwy25, `Nazwa stacji`)
nazwy10 <- nazwy[nazwy$`Kod stacji`%in%colnames(pm10),]
setkey(nazwy10, `Nazwa stacji`)
pal <- colorNumeric(
palette = c("green", "red", "darkorchid4"),
domain = 0:350)
save(pm, nazwy, nazwy25, nazwy10, pal, file="dane.RData")
|
4bbbeb8cbbb015d0f29994dcf2cbd5813b006433
|
786e89c7959029f9286abf8d133a8e7c930a5216
|
/Functions/AdaCLV/get_iterative_threshold.R
|
9042d8f04227a671e74b18ee0588e0cd1605baa1
|
[] |
no_license
|
rebeccamarion/AdaCLV
|
61fe3f4d5ebe82ec438feb5cd698162fad217e06
|
e123f9eeb8650dbaa90a159bd8a94aeb0a8496d3
|
refs/heads/main
| 2023-02-03T21:37:56.529721
| 2020-12-21T12:36:21
| 2020-12-21T12:36:21
| 322,311,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,061
|
r
|
get_iterative_threshold.R
|
GetIterativeThresh <- function(x, eta = 3){
converged <- F
signal.list <- list()
indexes <- 1:length(x)
iter <- 1
indexes.sub <- indexes
x.temp <- x
while (converged == F){
mean.x <- mean(x.temp)
sd.x <- sd(x.temp)
signal.list[[iter]] <- indexes.sub[which(abs(x.temp) > (mean.x + (eta*sd.x)))]
if (length(signal.list[[iter]]) == 0){
converged <- T
} else {
all.signal <- sort(unique(unlist(signal.list)))
x.temp <- x[-all.signal]
indexes.sub <- indexes[-all.signal]
iter <- iter + 1
}
}
all.signal <- sort(unique(unlist(signal.list)))
if (length(all.signal) > 0){
# No noise variables --> lowest threshold possible = 0
if (length(all.signal) == length(x)){
thresh <- 0
x.noise <- NULL
} else {
thresh <- max(x[-all.signal])
x.noise <- x[-all.signal]
}
} else {
thresh <- max(x)
x.noise <- x
}
thresh <- pmax(thresh, 0)
return(list(thresh = thresh, noise.covar = x.noise))
}
|
32ce45ea41539d7dd0d882971e35136268a3f120
|
d7803d8b4f8ed4db75be0a6ed035f073ad2b8509
|
/man/plot.mlma.Rd
|
a27492ae50b35f7b5fd3b82cde1c7729c9d3c646
|
[] |
no_license
|
cran/mlma
|
1880c122afdc3ca865e1858c9551cb972308d161
|
45cfaca440d914c94c5d2c26e7eeab3674fa6ac8
|
refs/heads/master
| 2022-06-15T19:17:38.281692
| 2022-05-16T16:40:05
| 2022-05-16T16:40:05
| 48,084,225
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,723
|
rd
|
plot.mlma.Rd
|
\name{plot.mlma}
\alias{plot.mlma}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot "mlma" Object
}
\description{
Plot the overall mediation effect or decomposed indirect effect from the selected mediator.
}
\usage{
\method{plot}{mlma}(x,..., var=NULL, cate=FALSE,
w2=rep(1,nrow(as.matrix(object$de2))))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
an "mlma" object.
}
\item{\dots}{
arguments to be passed to methods.
}
\item{var}{
the name of the mediator that is to be plotted. If var is NULL, plot the relative mediation effects of all mediators.
}
\item{cate}{
an indicator of whether the variable is categorical or not.
}
\item{w2}{
the weight for observations at level 2, which should be the same order as unique(level[!is.na(level)]). The default is rep(1,length(object$de2)).
}
}
\details{
Plot the relative effects of direct effects and indirect effects of mediators at level 1 (if levelx=1) and level 2 respectively if var=NULL. Otherwise, plot the indirect effect of var, the estimated differential effect of the predictor on var, and the predicted relationship between y and var at individual level and/or (aggregated) group level.
}
\author{
Qingzhao Yu (qyu@lsuhsc.edu), Bin Li (bli@lsu.edu).
}
\examples{
data(sim.211)
data1<-data.org(x=sim.211$x, m=sim.211$m,
f01y=list(1,c("x","log(x^2)")), f02ky=list(1,c("x","x^2")),
f20ky=list(2,c("x","x^3")), f01km2=list(matrix(c(1,1),1),c("x^1.2","x^2.3")),
f01km1=list(matrix(c(2,1),1),"sqrt(x)+3"),level=sim.211$level)
temp1<-mlma(y=sim.211$y, data1)
plot(temp1)
plot(temp1,var="m.1")
plot(temp1,var="m.3")
#plot(temp1,var="m.4")
data(sim.111)
data2<-data.org(sim.111$x, m=sim.111$m,
f10y=list(1,c("x^2","sqrt(x+6)")),
f20ky=list(2,c("x","x^3")),
f10km=list(matrix(c(2,1),1),"log(x+2)"), level=sim.111$level)
temp2<-mlma(y=sim.111$y, data1=data2)
plot(temp2)
#plot(temp2,var="m.2")
#plot(temp2,var="m.4")
plot(temp2,var="m.3")
data3<-data.org(x=cbind(sim.211$x,sim.111$x), m=sim.211$m,
f01y=list(1,c("x","log(x^2)")), f02ky=list(1,c("x","x^2")),
f20ky=list(2,c("x","x^3")), f01km1=list(matrix(c(2,1),1),"sqrt(x)+3"),
f01km2=list(matrix(c(1,1),1),c("x^1.2","x^2.3")), level=sim.211$level)
temp3<-mlma(y=sim.211$y, data3)
plot(temp3)
plot(temp3,var="m.1")
#plot(temp3,var="m.4")
plot(temp3,var="m.3")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{plot}
|
725591f2e9fd770fbae4f3b3181ac2db678fd601
|
f997169854672f36810e793a2932313f11b52139
|
/man/lorem.Rd
|
b0c85dc1e7f6303d84410db9f4c7cb1a8844f105
|
[] |
no_license
|
jverzani/UsingR
|
7e3fcbddae97a0ecd0268a9068af7a70ecc82907
|
d1cd49622b6e85cf26710c5747423b4ba0721ef6
|
refs/heads/master
| 2021-01-09T20:53:56.202763
| 2020-07-29T16:53:55
| 2020-07-29T16:53:55
| 57,312,995
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
rd
|
lorem.Rd
|
\name{lorem}
\alias{lorem}
\title{Placeholder text}
\description{
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
}
\usage{lorem}
\format{
a character string
}
\source{\url{http://www.lipsum.com/}}
\examples{
table(unlist(strsplit(lorem, "")))
}
\keyword{datasets}
|
a9a33b57231e289400782200ce0e40c5d2510099
|
a047f577562585eb32b386d247b5637c1613cb3e
|
/code/clean_attributes.R
|
fd10eae43c634e30444018ff9210f0a45fa040df
|
[] |
no_license
|
jshen226/STAT-628-2
|
636293fa00ee548aedcff9032a1676ba8ed30d3f
|
deabe20d2ca94283b725363b0deebac9115dfe4c
|
refs/heads/master
| 2021-10-25T06:11:54.085872
| 2019-04-02T02:04:12
| 2019-04-02T02:04:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,441
|
r
|
clean_attributes.R
|
rm(list = ls())
library(jsonlite)
library(dplyr)
library(data.table)
library(stringr)
business_train=jsonlite::stream_in(file("business_train.json"),pagesize = 100)
american_traditional_train=read.csv("americantraditional_sub_train.csv",header = T)
american_traditional_id=american_traditional_train$business_id
american_traditional_train_all=business_train[which(business_train$business_id %in% american_traditional_id),]
str(american_traditional_train_all)
american_traditional_star=read.csv("american_traditional_star_and_count.csv",header=T)$star
head(american_traditional_star)
str(american_traditional_star)
american_traditional_attributes=american_traditional_train_all$attributes
head(american_traditional_attributes)
str(american_traditional_attributes)
attributes=cbind(american_traditional_star,american_traditional_attributes)
head(attributes)
head(attributes$GoodForMeal)
goodforkids=attributes$GoodForKids
goodforkids[!duplicated(goodforkids)]
noiselevel=attributes$NoiseLevel
noiselevel[!duplicated(noiselevel)]
noiselevel[c(which(noiselevel=="u'average'"),which(noiselevel=="'average'"))]="average"
noiselevel[c(which(noiselevel=="u'quiet'"),which(noiselevel=="'quiet'"))]="quiet"
noiselevel[c(which(noiselevel=="u'very_loud'"),which(noiselevel=="'very_loud'"))]="very_loud"
noiselevel[c(which(noiselevel=="u'loud'"),which(noiselevel=="'loud'"))]="loud"
restaurantdelivery=attributes$RestaurantsDelivery
restaurantdelivery[!duplicated(restaurantdelivery)]
dessert=gsub(pattern = ".+dessert': (\\w+).+",replacement="\\1",x=attributes$GoodForMeal)
dessert[!duplicated(dessert)]
latenight=gsub(pattern=".+latenight': (\\w+).+",replacement = "\\1",x=attributes$GoodForMeal)
latenight[!duplicated((latenight))]
lunch=gsub(pattern=".+lunch': (\\w+).+",replacement = "\\1",x=attributes$GoodForMeal)
lunch[!duplicated(lunch)]
dinner=gsub(pattern=".+dinner': (\\w+).+",replacement = "\\1",x=attributes$GoodForMeal)
dinner[!duplicated(dinner)]
brunch=gsub(pattern=".+brunch': (\\w+).+",replacement = "\\1",x=attributes$GoodForMeal)
brunch[!duplicated(brunch)]
breakfast=gsub(pattern=".+breakfast': (\\w+).+",replacement = "\\1",x=attributes$GoodForMeal)
breakfast[!duplicated(breakfast)]
alcohol=attributes$Alcohol
alcohol[!duplicated(alcohol)]
alcohol[c(which(alcohol=="u'full_bar'"),which(alcohol=="'full_bar'"))]="full_bar"
alcohol[c(which(alcohol=="u'beer_and_wine'"),which(alcohol=="'beer_and_wine'"))]="beer_and_wine"
alcohol[c(which(alcohol=="u'none'"),which(alcohol=="'none'"))]="None"
caters=attributes$Caters
caters[!duplicated(caters)]
wifi=attributes$WiFi
wifi[!duplicated(wifi)]
wifi[c(which(wifi=="u'free'"),which(wifi=="'free'"))]="free"
wifi[c(which(wifi=="u'no'"),which(wifi=="'no'"))]="no"
wifi[c(which(wifi=="u'paid'"),which(wifi=="'paid'"))]="paid"
restaurantstakeout=attributes$RestaurantsTakeOut
restaurantstakeout[!duplicated(restaurantstakeout)]
businessacceptcreditcards=attributes$BusinessAcceptsCreditCards
businessacceptcreditcards[!duplicated(businessacceptcreditcards)]
ambience=attributes$Ambience
ambience[1:10]
touristy=gsub(pattern=".+touristy': (\\w+).+",replacement = "\\1",x=ambience)
touristy[!duplicated(touristy)]
hipster=gsub(pattern=".+hipster': (\\w+).+",replacement = "\\1",x=ambience)
hipster[!duplicated(hipster)]
hipster[c(which(hipster=="{'romantic': False, 'intimate': False, 'classy': False, 'upscale': False, 'touristy': False, 'trendy': False, 'casual': False}"),
which(hipster=="{'romantic': False, 'intimate': False, 'classy': False, 'upscale': False, 'touristy': False, 'trendy': False, 'casual': True}"))]="None"
romantic=gsub(pattern=".+romantic': (\\w+).+",replacement = "\\1",x=ambience)
romantic[!duplicated(romantic)]
divey=gsub(pattern=".+divey': (\\w+).+",replacement = "\\1",x=ambience)
divey[!duplicated(divey)]
divey[-c(which(divey=="False"),which(divey=="None"),which(is.na(divey)))]="None"
intimate=gsub(pattern=".+intimate': (\\w+).+",replacement = "\\1",x=ambience)
intimate[!duplicated(intimate)]
trendy=gsub(pattern=".+trendy': (\\w+).+",replacement = "\\1",x=ambience)
trendy[!duplicated(trendy)]
upscale=gsub(pattern=".+upscale': (\\w+).+",replacement = "\\1",x=ambience)
upscale[!duplicated(upscale)]
upscale[-c(which(upscale=="False"),which(upscale=="None"),which(is.na(upscale)),which(upscale=="True"))]="None"
classy=gsub(pattern=".+classy': (\\w+).+",replacement = "\\1",x=ambience)
classy[!duplicated(classy)]
casual=gsub(pattern=".+casual': (\\w+).+",replacement = "\\1",x=ambience)
casual[!duplicated(casual)]
businessparking=attributes$BusinessParking
head(businessparking)
garage=gsub(pattern = ".+'garage': (\\w+),.+",replacement = "\\1",x = businessparking)
garage[!duplicated(garage)]
street=gsub(pattern = ".+'street': (\\w+), .+",replacement = "\\1",x = businessparking)
street[!duplicated(street)]
validated=gsub(pattern = ".+'validated': (\\w+), .+",replacement = "\\1",x = businessparking)
validated[!duplicated(validated)]
validated[which(validated=="{'valet': False, 'garage': False, 'street': False, 'lot': True, 'validated': False}")]="False"
lot=gsub(pattern = ".+'lot': (\\w+), .+",replacement = "\\1",x = businessparking)
lot[!duplicated(lot)]
valet=gsub(pattern = ".+'valet': (\\w+).",replacement = "\\1",x = businessparking)
valet[!duplicated(valet)]
valet[which(valet=="False 'garage': False, 'street': False, 'lot': True, 'validated': False}")]="None"
restaurantstableservice=attributes$RestaurantsTableService
restaurantstableservice[!duplicated(restaurantstableservice)]
restaurantsgoodforgroups=attributes$RestaurantsGoodForGroups
restaurantsgoodforgroups[!duplicated(restaurantsgoodforgroups)]
outdoorseating=attributes$OutdoorSeating
outdoorseating[!duplicated(outdoorseating)]
hastv=attributes$HasTV
hastv[!duplicated(hastv)]
bikeparking=attributes$BikeParking
bikeparking[!duplicated(bikeparking)]
restaurantsreservations=attributes$RestaurantsReservations
restaurantsreservations[!duplicated(restaurantsreservations)]
restaurantspricerange2=attributes$RestaurantsPriceRange2
restaurantspricerange2[!duplicated(restaurantspricerange2)]
restaurantsattire=attributes$RestaurantsAttire
restaurantsattire[!duplicated(restaurantsattire)]
restaurantsattire[c(which(restaurantsattire=="'casual'"),which(restaurantsattire=="u'casual'"))]="casual"
restaurantsattire[c(which(restaurantsattire=="'dressy'"),which(restaurantsattire=="u'dressy'"))]="dressy"
restaurantsattire[c(which(restaurantsattire=="'formal'"),which(restaurantsattire=="u'formal'"))]="formal"
businessacceptbitcoin=attributes$BusinessAcceptsBitcoin
businessacceptbitcoin[!duplicated(businessacceptbitcoin)]
byappointmentonly=attributes$ByAppointmentOnly
byappointmentonly[!duplicated(byappointmentonly)]
acceptsinsurance=attributes$AcceptsInsurance
acceptsinsurance[!duplicated(acceptsinsurance)]
music=attributes$Music
head(music)
dj=gsub(pattern = ".'dj': (\\w+),.+",replacement = "\\1",x = music)
dj[!duplicated(dj)]
dj[which(dj=="{'dj': False}")]="False"
dj[which(dj=="{'live': True}")]="None"
dj[which(dj=="{'dj': True}")]="True"
dj[which(dj=="{'live': False}")]="None"
dj[which(dj=="{'live': False, 'dj': True}")]="True"
dj[which(dj=="{'jukebox': True}")]="None"
dj[which(dj=="{'jukebox': False}")]="None"
dj[which(dj=="{'karaoke': False}")]="None"
dj[which(dj=="{}")]="None"
background_music=gsub(pattern = ".+'background_music': (\\w+),.+",replacement = "\\1",x = music)
background_music[!duplicated(background_music)]
background_music[which(background_music=="{'dj': False}")]="None"
background_music[which(background_music=="{'live': True}")]="None"
background_music[which(background_music=="{'dj': True}")]="None"
background_music[which(background_music=="{'live': False, 'dj': True}")]="None"
background_music[which(background_music=="{'dj': False, 'karaoke': False}")]="None"
background_music[which(background_music=="{'jukebox': True}")]="None"
background_music[which(background_music=="{'live': False}")]="None"
background_music[which(background_music=="{'jukebox': False}")]="None"
background_music[which(background_music=="{'karaoke': False}")]="None"
background_music[which(background_music=="{'dj': False, 'live': False, 'video': False, 'jukebox': True}")]="None"
background_music[which(background_music=="{}")]="None"
no_music=gsub(pattern = ".+'no_music': (\\w+),.+",replacement = "\\1",x = music)
a=no_music[!duplicated(no_music)]
a
no_music[which(no_music==a[3])]="None"
no_music[which(no_music==a[4])]="None"
no_music[which(no_music==a[5])]="None"
no_music[which(no_music==a[6])]="None"
no_music[which(no_music==a[7])]="None"
no_music[which(no_music==a[8])]="None"
no_music[which(no_music==a[10])]="None"
no_music[which(no_music==a[11])]="None"
no_music[which(no_music==a[12])]="None"
no_music[which(no_music==a[13])]="None"
no_music[which(no_music==a[14])]="None"
no_music[which(no_music==a[15])]="None"
no_music[which(no_music==a[16])]="None"
no_music[which(no_music==a[17])]="None"
no_music[which(no_music==a[18])]="None"
no_music[which(no_music==a[19])]="None"
no_music[which(no_music==a[20])]="None"
no_music[which(no_music==a[21])]="None"
no_music[which(no_music==a[22])]="None"
no_music[which(no_music==a[23])]="None"
no_music[which(no_music==a[24])]="None"
no_music[which(no_music==a[25])]="None"
no_music[which(no_music==a[26])]="None"
no_music[which(no_music==a[27])]="None"
no_music[which(no_music==a[28])]="None"
no_music[which(no_music==a[29])]="None"
jukebox=gsub(pattern = ".+'jukebox': (\\w+),.+",replacement = "\\1",x = music)
a=jukebox[!duplicated(jukebox)]
a
jukebox[which(jukebox==a[4])]="None"
jukebox[which(jukebox==a[5])]="None"
jukebox[which(jukebox==a[6])]="None"
jukebox[which(jukebox==a[8])]="None"
jukebox[which(jukebox==a[9])]="None"
jukebox[which(jukebox==a[10])]="None"
jukebox[which(jukebox==a[11])]="True"
jukebox[which(jukebox==a[12])]="None"
jukebox[which(jukebox==a[13])]="False"
jukebox[which(jukebox==a[14])]="None"
jukebox[which(jukebox==a[15])]="True"
live=gsub(pattern = ".+'live': (\\w+),.+",replacement = "\\1",x = music)
a=live[!duplicated(live)]
a
live[which(live==a[4])]="None"
live[which(live==a[5])]="None"
live[which(live==a[6])]="True"
live[which(live==a[7])]="None"
live[which(live==a[8])]="None"
live[which(live==a[9])]="None"
live[which(live==a[10])]="None"
live[which(live==a[11])]="False"
live[which(live==a[12])]="None"
live[which(live==a[13])]="None"
video=gsub(pattern = ".+'video': (\\w+),.+",replacement = "\\1",x = music)
a=video[!duplicated(video)]
a
video[which(video==a[3])]="None"
video[which(video==a[4])]="None"
video[which(video==a[6])]="None"
video[which(video==a[8])]="None"
video[which(video==a[9])]="None"
video[which(video==a[10])]="None"
video[which(video==a[11])]="None"
video[which(video==a[12])]="None"
video[which(video==a[13])]="None"
video[which(video==a[14])]="None"
karaoke=gsub(pattern = ".+'karaoke': (\\w+).",replacement = "\\1",x = music)
a=karaoke[!duplicated(karaoke)]
a
karaoke[which(karaoke==a[4])]="None"
karaoke[which(karaoke==a[5])]="None"
karaoke[which(karaoke==a[6])]="None"
karaoke[which(karaoke==a[8])]="None"
karaoke[which(karaoke==a[9])]="None"
karaoke[which(karaoke==a[10])]="None"
karaoke[which(karaoke==a[11])]="None"
karaoke[which(karaoke==a[12])]="None"
karaoke[which(karaoke==a[13])]="None"
goodfordancing=attributes$GoodForDancing
goodfordancing[!duplicated(goodfordancing)]
coatcheck=attributes$CoatCheck
coatcheck[!duplicated(coatcheck)]
happyhour=attributes$HappyHour
happyhour[!duplicated(happyhour)]
bestnights=attributes$BestNights
bestnights[!duplicated(bestnights)]
monday=gsub(pattern=".+monday': (\\w+).+",replacement = "\\1",x=bestnights)
monday[!duplicated(monday)]
tuesday=gsub(pattern=".+tuesday': (\\w+).+",replacement = "\\1",x=bestnights)
tuesday[!duplicated(tuesday)]
wednesday=gsub(pattern=".+wednesday': (\\w+).+",replacement = "\\1",x=bestnights)
wednesday[!duplicated(wednesday)]
thursday=gsub(pattern=".+thursday': (\\w+).+",replacement = "\\1",x=bestnights)
thursday[!duplicated(thursday)]
friday=gsub(pattern=".+friday': (\\w+).+",replacement = "\\1",x=bestnights)
friday[!duplicated(friday)]
saturday=gsub(pattern=".+saturday': (\\w+).+",replacement = "\\1",x=bestnights)
saturday[!duplicated(saturday)]
sunday=gsub(pattern=".+sunday': (\\w+).+",replacement = "\\1",x=bestnights)
sunday[!duplicated(sunday)]
wheelchairaccessible=attributes$WheelchairAccessible
wheelchairaccessible[!duplicated(wheelchairaccessible)]
dogsallowed=attributes$DogsAllowed
dogsallowed[!duplicated(dogsallowed)]
byobcorkage=attributes$BYOBCorkage
byobcorkage[!duplicated(byobcorkage)]
byobcorkage[c(which(byobcorkage=="'no'"),which(byobcorkage=="u'no'"))]="no"
byobcorkage[c(which(byobcorkage=="'yes_corkage'"),which(byobcorkage=="u'yes_corkage'"))]="yes_corkage"
byobcorkage[which(byobcorkage=="'yes_free")]="yes_free"
drivethru=attributes$DriveThru
drivethru[!duplicated(drivethru)]
smoking=attributes$Smoking
smoking[!duplicated(smoking)]
smoking[c(which(smoking=="'no'"),which(smoking=="u'no'"))]="no"
smoking[c(which(smoking=="u'outdoor'"))]="outdoor"
smoking[c(which(smoking=="u'yes'"))]="yes"
agesallowed=attributes$AgesAllowed
agesallowed[!duplicated(agesallowed)]
agesallowed[which(agesallowed=="u'21plus'")]="21plus"
agesallowed[which(agesallowed=="u'19plus'")]="19plus"
agesallowed[which(agesallowed=="u'allages'")]="allages"
hairspecializesin=attributes$HairSpecializesIn
hairspecializesin[!duplicated(hairspecializesin)]################## NA
corkage=attributes$Corkage
corkage[!duplicated(corkage)]
byob=attributes$BYOB
byob[!duplicated(byob)]
dietaryrestrictions=attributes$DietaryRestrictions
dietaryrestrictions[!duplicated(dietaryrestrictions)]##################### NA
open24hours=attributes$Open24Hours
open24hours[!duplicated(open24hours)]
restaurantscounterservice=attributes$RestaurantsCounterService
restaurantscounterservice[!duplicated(restaurantscounterservice)]
clean_attributes=cbind(american_traditional_star,goodforkids,noiselevel,restaurantdelivery,
dessert,latenight,lunch,dinner,brunch,breakfast, #restaurants delivery
alcohol,caters,wifi,restaurantstakeout,businessacceptcreditcards,
touristy,hipster,romantic,divey,intimate,trendy,upscale,classy,casual, #ambience
garage,street,validated,lot,valet, #businessparking
restaurantstableservice,restaurantsgoodforgroups,outdoorseating,hastv,bikeparking,restaurantsreservations,
restaurantspricerange2,restaurantsattire,businessacceptbitcoin,byappointmentonly,acceptsinsurance,
dj,background_music,no_music,jukebox,live,video,karaoke, #music
goodfordancing,coatcheck,happyhour,
monday,tuesday,wednesday,thursday,friday,saturday,sunday, #bestnights
wheelchairaccessible,dogsallowed,byobcorkage,drivethru,smoking,agesallowed,corkage,
byob,open24hours,restaurantscounterservice
)
head(clean_attributes)
write.csv(clean_attributes,"clean_attributes.csv")
|
54311ad523901f3d14ca7fd057061c7fa330c396
|
7ed5ddafcffacc81a7e084cd54b8b63830c67ea5
|
/R Scripts/Figure5a.R
|
f5071412f5adda59bc45b2663bb17c8cf9148a41
|
[] |
no_license
|
HIT-biostatistical/LAK
|
e6a2c6fbace051da6dcbe827063846a3bc697bdc
|
c114b58453ffc0bf7ac8d01a46c3d6d0ae22015c
|
refs/heads/master
| 2021-07-18T08:23:30.439316
| 2020-06-05T13:01:49
| 2020-06-05T13:01:49
| 176,327,385
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,468
|
r
|
Figure5a.R
|
setwd("~/LAK")
source("LAK.R")
library(pheatmap)
Zeisel<-read.table("Single Cell Data/Zeisel.txt", header = T)
gene_names<-as.vector(Zeisel$cell_id)
Zeisel <- as.matrix(Zeisel[,-1])
load("RData/Zeisel_LAK.RData")
gene_names <- gene_names[which(rowSums(Zeisel>0.5)>2)]
Zeisel<-Zeisel[rowSums(Zeisel>0.5)>2,]
rownames(Zeisel) <- gene_names
Zeisel_normed <- Linnorm(Zeisel)
Zeisel_LAK_ann <- Zeisel_LAK[[1]]$Cs
Zeisel_with_LAK_genes <- Zeisel[Zeisel_LAK[[1]]$ws > 1e-5,]
Zeisel_with_LAK_genes_normed <- Zeisel_normed[Zeisel_LAK[[1]]$ws > 1e-5,]
Zeisel_marker<-c('Aif1','Aldoc','Acta2','Cldn5','Thy1','Spink8','Mbp','Gad1','Tbr1')
#t-test
T_matrix<- function(df,df_label){
out<-data.frame(row.names = rownames(df))
for( i in min(df_label):max(df_label) ){
for( j in 1:dim(df)[1]){
a<-as.vector(t(df[j,df_label==i]))
b<-as.vector(t(df[j,df_label!=i]))
if(sd(a)==0 && sd(b)==0){
}else{
#t检???
out[j,i]<-t.test(a,b, alternative="greater", paired = FALSE, var.equal = FALSE)$p.value
}
}
print(i)
}
colnames(out)<-paste("cluster",1:max(df_label))
return(out)
} #t test to get differential expressed genes
d_expr<-function(out_matrix,k=1,p=0.01,gene_names){
pvalue<-out_matrix[,k]
times<-0
gene_list<-c()
for( i in 1:dim(out_matrix)[1]){
if(!is.na(pvalue[i])&pvalue[i]<p){
times<-times+1
gene_list[times]<-gene_names[i]
}
}
return(gene_list)
}
T_diff<- T_matrix(Zeisel_with_LAK_genes, Zeisel_LAK_ann)
markers_in_diff_genes <- list()
all_diff_genes <- list()
topn <- 100
M <- T_diff
dgene <- c()
pgene <- c()
for(i in c(1,3:ncol(M))){
diff_genes <- rownames(M[order(M[,i]),])[1:topn]
Z_n <- Zeisel_with_LAK_genes_normed[diff_genes,]
cur<-Z_n[,Zeisel_LAK_ann==i]
other<-Z_n[,Zeisel_LAK_ann!=i]
other_median<-apply(other, 1, median)
other_mean<-apply(other,1,mean)
cur_median<-apply(cur,1,median)
cur_mean <- apply(cur, 1, mean)
cur_max <- apply(cur,1,max)
cur<-cur[10 * other_median< cur_median & 3* other_mean < cur_mean,]
cat(i,'\t',rownames(cur)[1:5])
cat('\n')
dgene <- c(dgene, rownames(cur)[1:5])
pgene<-c(pgene, rownames(cur[order(rowMeans(cur),decreasing = T),])[1:5])
}
for (i in 36:39){
pgene[i]<-pgene[i+1]
}
pgene[10]<- "Tbr1"
pgene[15]<- "Thy1"
pgene[20]<- "Spink8"
pgene[30]<- "Cldn5"
pgene[35] <- "Aldoc"
pgene[40] <- "Aif1"
pgene[32] <- "Aqp4"
ht_df <- Zeisel_with_LAK_genes_normed[pgene,Zeisel_LAK_ann!=2]
ann <- Zeisel_LAK_ann[-which(Zeisel_LAK_ann==2)]
annotation_col<-data.frame(Cluster = factor(as.character(ann)),
row.names=colnames(ht_df))
##Classification according to our clustering results
ht_df=data.frame(t(ht_df),ann)
ht_df=ht_df[order(ht_df[,"ann"]),]
ht_df=t(ht_df)
ht_df=ht_df[-nrow(ht_df),]
###
counti <- table(ann)
cur <- 0
gapscol <- c()
for(i in c(1:7)){
cur <- cur + counti[i]
gapscol <- c(gapscol, cur)
}
gapscol
###
ann_colors=list(Cluster = c("1"="#00FFFF","3"="#33FF33",
"4"="#993300","5"="#000000","6"="#006600",
"7"="#FF0000","8"="#999999","9"="#0000FF"))
p <- pheatmap(ht_df,cluster_rows = FALSE,cluster_cols=FALSE,
annotation_col = annotation_col,
annotation_colors = ann_colors,
show_rownames = F,
gaps_row = seq(8)*5,
gaps_col = gapscol,
fontsize = 8,
show_colnames =F)
|
33ae48e1dc7b5603898b8d2006c35f1d6b268a93
|
a79f0d1f251792dac65be35f98b68529fe9f97dd
|
/munge_data.R
|
6d349da65ef0cbab972974b81412b4cc93e2df1b
|
[] |
no_license
|
NielInfante/Leonardi_interactions
|
8ee1d7b006ea34860e6790256a49939f1b115e4c
|
5404115188cd936282bf1671dd31e58aa4a22356
|
refs/heads/master
| 2022-12-23T07:08:06.058632
| 2020-09-14T19:10:43
| 2020-09-14T19:10:43
| 295,514,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,613
|
r
|
munge_data.R
|
library(tidyverse)
library(DESeq2)
library(tximport)
#library(cowplot)
#library(gridExtra)
setwd('c://Users/nieli/projects/Leonardi/')
tx2gene <- read_tsv('Data/IDs')
metadata <- read_tsv('Data/meta.txt')
metadata$Sex <- as.factor(metadata$Sex)
metadata$Genotype <- as.factor(metadata$Genotype)
metadata$Feed <- as.factor(metadata$Feed)
metadata$Group <- as.factor(metadata$Group)
outPrefix <- 'All'
PCA_Group <- 'Group'
design =~ Genotype + Sex + Feed + Genotype:Sex + Genotype:Feed + Sex:Feed + Genotype:Sex:Feed
design =~ Genotype + Sex + Feed + Genotype:Sex
contrast <- c('Group', 'C', 'D')
samples <- metadata$Sample
files <- paste0('salmon/', samples, '/quant.sf')
print("Files are:")
print(files)
txi <- tximport(files, type='salmon', tx2gene = tx2gene)
dds <- DESeqDataSetFromTximport(txi, metadata, design)
dds <- dds[rowSums(counts(dds)) > 0,]
keep <- rowSums(counts(dds) >= 10) >= 3
dds <- dds[keep,] # filter them out
dds <- DESeq(dds)
tpm <- as.data.frame(txi$abundance)
names(tpm) <- paste0(samples, '_TPM')
tpm$meanTPM <- rowMeans(tpm)
tpm$GeneID <- row.names(tpm)
# Add count data
cnt <- as.data.frame(counts(dds, normalized=T))
names(cnt) <- colData(dds)[,'Sample']
cnt$GeneID <- row.names(cnt)
# Add Biotypes
biotype <- read_tsv('Data/Biotype')
biotype <- left_join(biotype, tx2gene)
# Only need one transcript per gene
biotype <- biotype[!duplicated(biotype$GeneID),]
out <- inner_join(cnt, tpm)
out <- left_join(out, biotype)
sum(rowSums(out[1:32]) < 5000)
dim(out)
o2 <- out[rowSums(out[1:32]) > 6000,]
dim(o2)
write_tsv(o2, 'Data/expression.tsv')
names(out)
|
ecc9435873c2c11bc810d3f8c5705af262f551df
|
3abfaa6cf0b52c5ff44e9c0aa7d842f73e2d876e
|
/scripts/buoys-validator/resources/r/annual-comparison.R
|
6506e73e8ee413a3a96ccfed40124ddb243a1207
|
[] |
no_license
|
rchailan/mirmidon-toolbox
|
0c272c4f7b7fd3db1d2a2a975b8a4e86876bb677
|
d116b1e0c8c7faaa4dd99144ea4965a1fa64fd03
|
refs/heads/master
| 2021-05-29T01:03:44.384387
| 2015-04-23T17:10:45
| 2015-04-23T17:10:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,086
|
r
|
annual-comparison.R
|
require(ggplot2)
require(reshape2)
has2print<-FALSE
year<-"2012"
start<-"2012-01-01 00:00:00"
end<-"2012-12-31 23:00:00"
year<-"2011"
start<-"2011-01-01 00:00:00"
end<-"2011-12-31 23:00:00"
stations=c("MeteoFranc","Espiguette","Sete","Leucate","Banyuls")#,"NICE","PORQUEROLLES")
files=c("Lion_HS_","CANDHIS_export_pem_03001_Base","CANDHIS_export_pem_03404_Base",
"CANDHIS_export_pem_01101_Base","CANDHIS_export_pem_06601_Base")#,"CANDHIS_export_pem_00601_Base","CANDHIS_export_pem_08301_Base")
cbbPalette <- c("lightgrey", "green", "purple", "blue","red")
cbbPalette2 <- c( "green", "purple", "blue", "red")
extensions<-c("ZWND10","ZWND6")
extensions<-c("ZWND6")
#For each stations
i<-1
for(station in stations) {
file<-files[i]
for (extension in extensions) {
#MODEL PART
#HS
modeldf.hs<-read.csv(paste("../../outputs/",station,"-hs",extension,".csv",sep=""),header=TRUE,col.names=c("date","hs"))
modeldf.hs['date']<-as.POSIXct(modeldf.hs$date,tz="UTC",format='%Y-%m-%d %H:%M:%S')
modeldf.hs['hs']<-as.numeric(modeldf.hs$hs)
#fp - dominant wave frequency
modeldf.fp<-read.csv(paste("../../outputs/",station,"-fp",extension,".csv",sep=""),header=TRUE,col.names=c("date","fp"))
modeldf.fp['date']<-as.POSIXct(modeldf.fp$date,tz="UTC",format='%Y-%m-%d %H:%M:%S')
modeldf.fp['period']<-1/as.numeric(modeldf.fp$fp)
modeldf.fp <- subset(modeldf.fp, select = -c(fp) )
#th1m - mean wave direction
modeldf.th1m<-read.csv(paste("../../outputs/",station,"-dir",extension,".csv",sep=""),header=TRUE,col.names=c("date","th1m"))
modeldf.th1m['date']<-as.POSIXct(modeldf.th1m$date,tz="UTC",format='%Y-%m-%d %H:%M:%S')
modeldf.th1m['th1m']<-as.numeric(modeldf.th1m$th1m)
#th1p - dominant wave direction
modeldf.th1p<-read.csv(paste("../../outputs/",station,"-dirpeak",extension,".csv",sep=""),header=TRUE,col.names=c("date","th1p"))
modeldf.th1p['date']<-as.POSIXct(modeldf.th1p$date,tz="UTC",format='%Y-%m-%d %H:%M:%S')
modeldf.th1p['th1p']<-as.numeric(modeldf.th1p$th1p)
modeldf<-merge(modeldf.hs,
merge(modeldf.fp,
merge(modeldf.th1p,
modeldf.th1m)))
modeldf<-modeldf[modeldf$date >= as.POSIXct(start,tz="UTC") & modeldf$date <= as.POSIXct(end,tz="UTC") ,]
assign(paste("df.mod.",extension,sep = ""),melt(modeldf,id=1))
Source<-rep(paste("Model-",extension,sep=""),nrow(get(paste("df.mod.",extension,sep = ""))))
assign(paste("df.mod.",extension,sep = ""),cbind(get(paste("df.mod.",extension,sep = "")),Source))
}
#MEASURES/OBSERVATION PART
if (station %in% "MeteoFranc") {
#HYMEX SRC
measuredf<-read.csv(paste("../../inputs/GOL-buoy-hymex/Lion_HS_",year,".dat",sep=""),sep=";",header=TRUE,col.names=c("date","hs"))
measuredf['date']<-as.POSIXct(measuredf$date,tz="UTC",format='%Y-%m-%d %H:%M:%S')
} else {
measuredf<-read.csv(paste("../../inputs/candhis//donnees_candhis_cerema/",file,".csv",sep=""),sep=";",header=TRUE)
measuredf$dateheure<-as.POSIXct(measuredf$dateheure,tz="UTC",format='%Y-%m-%d %H:%M:%S')
if(is.null(measuredf$hm0[1])) {hsignificant <- measuredf$h13d; print("Warning: Hm0 not available")} else {hsignificant <- measuredf$hm0}
measuredf<-data.frame(date=measuredf$dateheure,
hs=hsignificant,#non spectral mais pas disponible
period=measuredf$tp,#periode moyenne
th1p=measuredf$thetap,#dir pic
th1m=measuredf$thetam#dir moyenne
)
}
measuredf<-measuredf[measuredf$date >= as.POSIXct(start,tz="UTC") & measuredf$date <= as.POSIXct(end,tz="UTC") ,]
df.obs<-melt(measuredf,id=1)
df.obs['Source']<-"Buoy"
##COMPARISONS##
df<-df.obs
for (extension in extensions) {
df<-rbind(df,get(paste("df.mod.",extension,sep = "")))
}
# Change levelnames
levels(df$variable) <- c("Hs(m)","PeakPeriod(s)","Dir(Deg)","MeanDir(Deg)")
# base layer plot
ggplot<-ggplot(df,aes(x=date,y=value,color=`Source`)) + facet_grid(variable~., scales='free') +
scale_colour_manual(values=cbbPalette) +
theme(panel.background = element_rect(fill="white")) +
theme_bw() +
theme(legend.position = c(0.90, 0.95)) + # c(0,0) bottom left, c(1,1) top-right.
theme(legend.background = element_rect(fill = "#ffffffaa", colour = NA))+
theme(text = element_text(size=20))+
labs(title=paste("Buoy: ",station, " | Year: ", year,sep=""))+
geom_line(data=df[df$variable=="Hs(m)", ],,alpha=2/3) +
geom_line(data=df[df$variable=="PeakPeriod(s)",],alpha=2/3) +
geom_point(data=df[df$variable=="Dir(Deg)",],alpha=2/3,size=1.2) +
geom_point(data=df[df$variable=="MeanDir(Deg)",],alpha=2/3,size=1.2)
# print(ggplot)
if (has2print) {
ggsave(paste("~/Desktop/",year,"-",station,".png",sep=""),width=20,height=7)
}
#QQPLOT
for (extension in extensions) {
tmp<-as.data.frame(qqplot(get(paste("df.mod.",extension,sep = ""))[get(paste("df.mod.",extension,sep = ""))$variable=="hs",'value'],
measuredf$hs,plot.it=FALSE))
colnames(tmp)[1]<-'modeled'
colnames(tmp)[2]<-'observed'
assign(paste("qqp-vect.",extension,sep = ""),tmp)
}
l<-ls(pattern='qqp-vect.*')
d<-get(l[1])$observed
for (i in (1:length(l))) {
d<-cbind(d,get(l[i])$modeled)
}
d<-as.data.frame(d)
names<-substr(x = l,start = 10,stop = 20)
colnames(d)<-c('observed',as.vector(names))
d2<-melt(d,id.vars = 1)
qqp<- ggplot(d2,aes(x=observed,y=value,color=variable)) +
theme(panel.background = element_rect(fill="white"))+
theme(text = element_text(size=20))+
theme_bw() +
theme(legend.position = c(0.95, 0.8)) + # c(0,0) bottom left, c(1,1) top-right.
theme(legend.background = element_rect(fill = "#ffffffaa", colour = NA))+
xlab("Hs(m) Observation") +
ylab("Hs(m) Modeled") +
xlim(c(0,10))+
ylim(c(0,10))+
labs(title=paste("QQplot - ",station, " (",year,")",sep=""))+
scale_colour_manual("Model",values=cbbPalette2) +
geom_point(size=1.5,shape=3) +
geom_abline(slope=1,colour="red",alpha=2/3)
if (has2print) {
ggsave(paste("~/Desktop/",year,"-qqplot-",station,".png",sep=""),width=15,height=5)
}
i<-i+1;
# assign(x = paste(station,year,"extracted",sep="."),value = df)
}
# save(list = ls(pattern="*.2012.extracted"),file = paste(year,".RData",sep="") )
#### BACKUP #####
# stations=c("Espiguette")
# file="CANDHIS_export_pem_03001_Base"
# stations=c("Sete")
# file="CANDHIS_export_pem_03404_Base"
# stations=c("Leucate")
# file="CANDHIS_export_pem_01101_Base"
# stations=c("Banyuls")
# file="CANDHIS_export_pem_06601_Base"
# stations=c("CAMARGUE")
# file="CANDHIS_export_pem_01301_Base"
# stations=c("MARSEILLE")
# file="CANDHIS_export_pem_01304_Base"
# stations=c("TOULON")
# file="CANDHIS_export_pem_08301_Base"
# stations=c("NICE")
# file="CANDHIS_export_pem_00601_Base"
# stations=c("LEPLANIER")
# file="CANDHIS_export_pem_01305_Base"
# stations=c("PORQUEROLLES")
# file="CANDHIS_export_pem_08301_Base"
# stations=c("NICELARGE")
#MEASURE PART
#SOLTC SRC
# measuredf.full<-read.csv(paste(paste("../../inputs/soltc/",station,sep=""),".csv",sep=""),header=TRUE,col.names=c("id","date","temp","temp_pt_rosee","sea_pressure","wnd","wnd_dir","gust","humidity","tension_vap","sst","hs","tmoy","consolidated","quai"))
# measuredf.full['date']<-as.POSIXct(measuredf.full$date,tz="Etc/GMT-12",format='%Y-%m-%d %H:%M:%S')
# measuredf.full['temp']<-as.numeric(as.character(measuredf.full$temp))
# measuredf.full['temp_pt_rosee']<-as.numeric(as.character(measuredf.full$temp_pt_rosee))
# measuredf.full['sea_pressure']<-as.numeric(as.character(measuredf.full$sea_pressure))
# measuredf.full['wnd']<-as.numeric(as.character(measuredf.full$wnd))
# measuredf.full['wnd_dir']<-as.numeric(as.character(measuredf.full$wnd_dir))
# measuredf.full['gust']<-as.character(measuredf.full$gust)
# measuredf.full['humidity']<-as.numeric(as.character(measuredf.full$humidity))
# measuredf.full['tension_vap']<-as.numeric(as.character(measuredf.full$tension_vap))
# measuredf.full['sst']<-as.numeric(as.character(measuredf.full$sst))
# measuredf.full['hs']<-as.numeric(as.character(measuredf.full$hs))
# measuredf.full['tmoy']<-as.numeric(as.character(measuredf.full$tmoy))
# measuredf.full['consolidated']<-as.character(measuredf.full$consolidated)
#
# measuredf<-data.frame(date=measuredf.full$date,hs=measuredf.full$hs)
# #measuredf<-measuredf[measuredf$date > as.POSIXct("2011-10-01 00:00:00",tz="GMT") & measuredf$date < as.POSIXct("2011-10-10 00:00:00",tz="GMT"),]
#
# #HYMEX SRC
# measuredf<-read.csv(paste("../../inputs/GOL-buoy-hymex/Lion_HS_",year,".dat",sep=""),sep=";",header=TRUE,col.names=c("date","hs"))
# measuredf['date']<-as.POSIXct(measuredf$date,tz="Etc/GMT-12",format='%Y-%m-%d %H:%M:%S')
#
# #MERGE dfs
# df.mix<-merge(modeldf,measuredf,by='date',suffixes=c(".modeled",".measured"))
#
# df.mix<-df.mix[df.mix$date > as.POSIXct(start,tz="GMT") & df.mix$date < as.POSIXct(end,tz="GMT"),]
#
#
# #PLOT
# pline<-ggplot(df.mix, aes(date)) +
# geom_line(aes(y = hs.measured,colour="measured"),alpha=1,size=0.5)+
# geom_line(aes(y = hs.modeled,colour="modeled"),alpha=1/2,size=0.5)+
# scale_colour_manual("", breaks = c("measured", "modeled"),
# values = c("lightgrey", "blue")) +
# theme(panel.background = element_rect(fill="white"))+
# ylab("Hs (m)") +
# xlab(paste("Time (Year ",year,")",sep=""))+
# labs(title=paste("Validation - ",buoy,sep=""))
# if (has2print) {
# ggsave(paste("~/Desktop/",year,"-pline-",station,".png",sep=""),width=8,height=6)
# }
#
# ppoint<-ggplot(df.mix, aes(date)) +
# theme(panel.background = element_rect(fill="white"))+
# geom_point(aes(y = hs.measured, colour = "measured"),alpha=1,size=2) +
# geom_point(aes(y = hs.modeled, colour = "modeled"),alpha=1/2,size=2) +
# scale_colour_manual("", breaks = c("measured", "modeled"),
# values = c("lightgrey", "blue")) +
# ylab("Hs (m)") +
# labs(title=paste("Validation - ",buoy,sep=""))+
# xlab(paste("Time (Year ",year,")",sep=""))
# if (has2print) {
# ggsave(paste("~/Desktop/",year,"-ppoint-",station,".png",sep=""),width=8,height=6)
# }
#
#d<-data.frame(modeled=df.mix$hs.modeled, observed=df.mix$hs.measured)
#qqp<-ggplot(d,aes(sample=d$observed))+stat_qq()
#
# #SCATTERPLOT
# d<-data.frame(modeled=df.mix$hs.modeled, observed=df.mix$hs.measured)
# sp <- ggplot(d, aes(x=d$modeled, y=d$observed)) +
# theme(panel.background = element_rect(fill="white"))+
# geom_point(shape=1) + # Use hollow circles
# xlab("model") +
# ylab("observation") +
# #geom_smooth(method=lm)
# geom_smooth() # Add a loess smoothed fit curve with confidence region
# if (has2print) {
# ggsave(paste("~/Desktop/",year,"-scatter-",station,".png",sep=""),width=8,height=6)
# }
|
18099b5602c91d6c123948cd9ad4a9ab219508e0
|
d7fc51552341d7873a90855e5fee9c29b7299caf
|
/man/train_pipeline.Rd
|
480fc3847bbbbe7bb6c51a466ceb99c88dfd035d
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jeroenvdhoven/datapiper
|
333460c8128ac8b49e534168dd17fd1aae98f2d9
|
6a035f6b3feee328586ef078ea95bad46ace5f69
|
refs/heads/master
| 2020-04-01T01:31:51.692883
| 2019-07-11T07:27:51
| 2019-07-11T07:27:51
| 152,742,519
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,045
|
rd
|
train_pipeline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline.R
\name{train_pipeline}
\alias{train_pipeline}
\title{Create a train/test pipeline from individual functions}
\usage{
train_pipeline(..., response)
}
\arguments{
\item{...}{Pipe segments. Each pipe segment is a list containing at least a \code{.segment} argument, which holds the function.
Other parts of the list will be treated as additional arguments to that function. Segments can be named, but don't have to be.
The default name for segment i is \code{pipe_<i>}.
\code{\link{segment}} provides a simple wrapper for these pipe segments.
These arguments are evaluated at time of calling (so once you call the pipeline function), however if you wish to create arguments based
on the datasets just before starting the processing, remember you can always wrap a pipe with another function so you can do the calculations there.
The function should always accept at least a \code{train} argument for the train dataset.
Each function should also return a list with (at a minimum) two named items: \code{train} and \code{pipe}, a trained pipe segment.
You can create these pipe segments using \code{\link{pipe}}.
If a function also returns a \code{post_pipe} item in the list, that will be added to a post-transformation pipeline. The post-pipeline will be automatically
reversed to ensure re-transformations are executed in the correct order.}
\item{response}{Since \code{response} is a parameter often used in this package, you can set it here to have it automatically set in pipeline where needed.}
}
\value{
A function, taking as arguments \code{train}. This function will return a list of the transformed \code{train} dataset after running it through all pipeline functions,
as well as a \code{\link{pipeline}} that reproduces the process for new data. Pipelines will be named based on either the names given in the call
or default names will be generated (see param section).
}
\description{
Create a train/test pipeline from individual functions
}
\details{
Since this function returns a \code{pipe} entry in its list, it should be possible to use the result of this function in a new pipeline.
}
\examples{
library(dplyr)
set.seed(1)
train <- data_frame(a = 1:10, b = sample(c(1,2, NA), size = 10,
replace = TRUE), c = sample(c(1,2), size = 10, replace = TRUE))
test <- data_frame(a = 1:10, b = sample(c(1,2, NA), size = 10,
replace = TRUE), c = sample(c(1,2), size = 10, replace = TRUE))
P <- train_pipeline(
segment(.segment = datapiper::pipe_NA_indicators),
segment(.segment = datapiper::pipe_impute, exclude_columns = "a"),
segment(.segment = datapiper::pipe_remove_high_correlation_features, exclude_columns = "a"),
segment(.segment = datapiper::pipe_create_stats, stat_cols = "b",
response = "a", functions = list("mean" = mean, "sd" = sd),
too_few_observations_cutoff = 0)
)
trained_pipeline <- P(train = train)$pipe
train <- invoke(trained_pipeline, train)
test <- invoke(trained_pipeline, test)
}
|
59561865e2d9f90410c9e9cb4a146a2f448b6fee
|
293c38314bc324400a68b8932ffbbdfabc0a6f4c
|
/man/correl2Comp.Rd
|
675da1926f7b6538093e0dfb94ee28dad9d99486
|
[] |
no_license
|
bitona/MineICA
|
43ef703ede27cf38f2a14631dff92d95d69293ea
|
7c7785daa0cfbcabe0f18161a64552b616f7b14c
|
refs/heads/master
| 2023-04-17T06:17:19.288231
| 2023-04-11T20:32:39
| 2023-04-11T20:32:39
| 71,994,441
| 2
| 1
| null | 2016-10-26T10:57:19
| 2016-10-26T10:57:19
| null |
UTF-8
|
R
| false
| false
| 1,576
|
rd
|
correl2Comp.Rd
|
\name{correl2Comp}
\alias{correl2Comp}
\title{correl2Comp}
\description{This function computes the correlation between two components.}
\usage{correl2Comp(comp1, comp2, type.corr = "pearson", plot = FALSE,
cutoff_zval = 0, test = FALSE, alreadyTreat = FALSE)}
\arguments{
\item{comp1}{The first component, a vector of projections or contributions indexed by labels}
\item{comp2}{The second component, a vector of projections or contributions indexed by labels}
\item{type.corr}{Type of correlation to be computed, either \code{'pearson'} or \code{'spearman'}}
\item{plot}{if \code{TRUE}, plot \code{comp1} vs \code{comp2}}
\item{cutoff_zval}{either NULL or 0 (default) if all genes are used to compute the correlation between the components, or a threshold to compute the correlation on the genes that have at least a scaled projection higher than cutoff_zval. }
\item{test}{if TRUE the correlation test p-value is returned instead of the correlation value}
\item{alreadyTreat}{if TRUE comp1 and comp2 are considered as being already treated (i.e scaled and restricted to common elements) }
}
\details{Before computing the correlation, the components are scaled and restricted to common labels.
When \code{cutoff_zval} is different from \code{0}, the elements that are included in the circle of center 0 and radius \code{cutoff_zval} are not taken into account during the computation of the correlation.}
\value{This function returns either the correlation value or the p-value of the correlation test.}
\author{Anne Biton}
|
dc4f792b9a00406675afc199dc45bd04e8fb7a8d
|
20d793950af5e0c63f2a55eabbd1987d2fac517f
|
/man/treecanopy.Rd
|
a8c59bf9c5aa591e9c690baa8d4b5a778cd3fea1
|
[] |
no_license
|
RemkoDuursma/lgrdata
|
d708ad4af6dc586aa7ffbdc4ccf9bbb551da23bf
|
40f47e6588c056dfc9c909951e718ce4775bda3e
|
refs/heads/master
| 2020-04-18T15:36:00.468086
| 2019-06-19T09:45:57
| 2019-06-19T09:45:57
| 167,615,274
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,662
|
rd
|
treecanopy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\docType{data}
\name{treecanopy}
\alias{treecanopy}
\title{Tree canopy gradients in the Priest River Experimental Forest (PREF)}
\format{A data frame with 249 rows and 7 variables:
\describe{
\item{\code{ID}}{integer ID of the individual tree}
\item{\code{species}}{integer Pinus ponderosa or Pinus monticola}
\item{\code{dfromtop}}{double Distance from top of tree (where leaf sample was taken) (m)}
\item{\code{totheight}}{double Total height of the tree (m)}
\item{\code{height}}{double Height from the ground (where sample was taken) (m)}
\item{\code{LMA}}{double Leaf mass per area (g m$^{-2}$)}
\item{\code{narea}}{double Nitrogen per area (gN m$^{-2}$)}
}}
\source{
Marshall, J.D., Monserud, R.A. 2003. Foliage height influences specific
leaf area of three conifer species. Can J For Res 33:164-170
}
\usage{
treecanopy
}
\description{
Leaves of two pine species (35 trees in total) were sampled throughout their canopy, usually 8 samples were
taken at various heights. The height is expressed as the 'distance from top', i.e. the distance to the
apex of the tree. Leaves (conifer needles) were analysed for nitrogen content (narea), and an index
of leaf thickness, the 'leaf mass per area'. The data show the usual pattern of higher leaf thickness
(higher LMA) toward the top of the trees, but individual trees show a lot of variation in LMA.
}
\examples{
data(treecanopy)
if(require(ggplot2)){
ggplot(treecanopy, aes(dfromtop,LMA,group=ID,col=species)) +
geom_point() +
stat_smooth(method="lm",se=FALSE) +
theme_minimal()
}
}
\keyword{datasets}
|
6df015fd7af4c274348a547b658f015017ae72b9
|
5fc716629fc4a3b7c6467f1e2cc595ee77ada17d
|
/core/Conditionals.R
|
98da314e11ee1162345fb84a82d7d4a502fd7323
|
[] |
no_license
|
jeandersonbc/r-programming
|
7c0c3ddca19c3242eea6185027433d3935cf0120
|
a3d90d69a7acc611dd326d5b6c697cdb556a9201
|
refs/heads/master
| 2020-06-30T03:56:02.764802
| 2016-11-23T02:50:11
| 2016-11-23T02:50:11
| 74,392,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
r
|
Conditionals.R
|
# Conditionals
# rnorm generates N random numbers according to the normal distribution
randomValue <- rnorm(n=1)
# if-else are like any other c-like if-else statement
if (randomValue > 1) {
print(paste(randomValue, "is greater than one"))
} else if (randomValue >= -1) {
print(paste(randomValue, "between -1 and 1"))
} else {
print(paste(randomValue, "less than -1"))
}
|
3e0b362548827544ad061eb561d3674fab262c07
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/ADPclust/R/ADPclust-package.r
|
bc7e39f412990148705b2bbc728e7be15d9b7f9c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 513
|
r
|
ADPclust-package.r
|
#' @title Fast Clustering Using Adaptive Density Peak Detection
#'
#' @author Yifan "Ethan" Xu, Xiao-Feng Wang
#'
#' @import cluster
#' @import knitr
#' @import fields
#' @import dplyr
#' @references Rodriguez, A., & Laio, A. (2014). Clustering by fast search and find of density peaks. Science,
#' 344(6191), 1492-1496. doi:10.1126/science.1242072"
#' @references Xiao-Feng Wang & Yifan Xu (2015). Fast Clustering Using Adaptive Density Peak Detection (accepted)
#'
#' @name ADPclust
#' @docType package
NULL
|
f70823b8dd6d37e06318beee751ea2386665f0da
|
a36fc4fd9b231e30e6d9519c77cafbdc9ceeaa27
|
/cachematrix.R
|
38f547239cf4eb08876bbe773def3d0528aa15ae
|
[] |
no_license
|
aralfaruqi/ProgrammingAssignment2
|
818f6485f8b6757e1119565f58b8f3b4aa0c922d
|
088ef0369ff739d28812ad2dd088b78a3fa49237
|
refs/heads/master
| 2022-12-14T11:55:48.615350
| 2020-09-15T14:27:13
| 2020-09-15T14:27:13
| 295,747,294
| 0
| 0
| null | 2020-09-15T14:04:47
| 2020-09-15T14:04:46
| null |
UTF-8
|
R
| false
| false
| 897
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x=matrix()) {
inverseM <- NULL #init inverse as NULL
set<-function(y) {
x<<-y
inverseM<<-NULL
}
get<-function() {
x
}
setinverseM<-function(inverse) {
inverseM<<-inverse
}
getinverseM<-function() {
inverted<-solve(x)
inverted #obtain inverse of matrix
}
list(set=set,get=get,setinverseM=setinverseM,getinverseM=getinverseM)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...)
{
inverseM<-x$getinverseM()
if(!is.null(inverseM)) { #check the inverse matrix is NULL or not
message("getting cached data!")
return(inverseM)
}
data<-x$get()
inverseM<-solve(data,...) #getting inverse matrix value
x$setinverseM(inverseM)
inverseM
}
|
649282eccdd2914a3d257185f1fe790798aefd0a
|
0574647f4c0678d6073d64511ce04e5fcecb28b4
|
/Change_Metrics_Extractor/GerritDataAnalyzer/src/experimental/LDATopicModeling.R
|
be105bff975e4261136a3999c9c6e65a3928b8fc
|
[] |
no_license
|
atifghulamnabi/frAmework-clouD-bAsed-buG-predIctiOn
|
516502a3dda6d4875b83df64e5e235a3b5e589c1
|
bd18ce5e3632dd76ba4147af296f9db40ee7f5e2
|
refs/heads/master
| 2023-04-19T23:05:27.440786
| 2020-09-03T13:52:19
| 2020-09-03T13:52:19
| 230,427,835
| 1
| 2
| null | 2021-04-26T20:34:59
| 2019-12-27T10:57:29
|
Java
|
UTF-8
|
R
| false
| false
| 5,271
|
r
|
LDATopicModeling.R
|
# read in the libraries
library(tidyverse) # general utility & workflow functions
library(tidytext) # tidy implimentation of NLP methods
library(topicmodels) # for LDA topic modelling
library(tm) # general text mining functions
library(SnowballC) # for stemming
current_path <-
getActiveDocumentContext()$path # get path of current src-file
setwd(dirname(current_path)) # set working-dir to current src-file
print(getwd()) # make sure it's correct
projectName <- "acceleo"
inDir <- paste("../projectData/", projectName, "/", sep="")
outDir <- paste("../results/", projectName, "/commentsAnalysis/", sep="")
dataPath <- paste(inDir, projectName, "_inlineComments.csv", sep = "")
texts <- read_csv(dataPath)
docPrefix <- paste(projectName, "_comments_analysis_", sep = "")
javaKeyWords <- c("abstract", "continue", "for", "new", "switch",
"assert", "default", "goto", "package", "synchronized",
"boolean", "do", "if", "private", "this",
"break", "double", "implements", "protected", "throw",
"byte", "else", "import", "public", "throws",
"case", "enum", "instanceof", "return", "transient",
"catch", "extends", "int", "short", "try",
"char", "final", "interface", "static", "void",
"class", "finally", "long", "strictfp", "volatile",
"const", "float", "native", "super", "while")
tidyJavaKeywords <- tidy(javaKeyWords)
saveDoc <- function(data, fileName){
fwrite(data, paste(outDir, docPrefix, fileName, sep=""))
}
savePlot <- function(myPlot, plotName) {
ggsave(filename=paste(outDir, docPrefix, plotName, sep=""), plot=myPlot)
}
# create a document term matrix to clean
reviewsCorpus <- Corpus(VectorSource(texts$commentReviewer))
reviewsDTM <- DocumentTermMatrix(reviewsCorpus)
# convert the document term matrix to a tidytext corpus
reviewsDTM_tidy <- tidy(reviewsDTM)
# remove stopwords
reviewsDTM_tidy_cleaned <- reviewsDTM_tidy %>% # take tidy dtm
anti_join(stop_words, by = c("term" = "word")) %>% # remove English stopwords
anti_join(tidyJavaKeywords, by= c("term" = "x"))
# stem the words
reviewsDTM_tidy_cleaned <- reviewsDTM_tidy_cleaned %>%
mutate(stem = wordStem(term))
# reconstruct cleaned documents (so that each word shows up the correct number of times)
cleaned_documents <- reviewsDTM_tidy_cleaned %>%
group_by(document) %>%
mutate(terms = toString(rep(term, count))) %>%
select(document, terms) %>%
unique()
# check out what the cleaned documents look like (should just be a bunch of content words)
# in alphabetic order
head(cleaned_documents)
# function to get & plot the most informative terms by a specificed number
# of topics, using LDA
top_terms_by_topic_LDA <- function(input_text, # should be a columm from a dataframe
plot = T, # return a plot? TRUE by defult
number_of_topics = 4) # number of topics (4 by default)
{
# create a corpus (type of object expected by tm) and document term matrix
Corpus <- Corpus(VectorSource(input_text)) # make a corpus object
DTM <- DocumentTermMatrix(Corpus) # get the count of words/document
# remove any empty rows in our document term matrix (if there are any
# we'll get an error when we try to run our LDA)
unique_indexes <- unique(DTM$i) # get the index of each unique value
DTM <- DTM[unique_indexes,] # get a subset of only those indexes
# preform LDA & get the words/topic in a tidy text format
lda <- LDA(DTM, k = number_of_topics, control = list(seed = 1234))
topics <- tidy(lda, matrix = "beta")
# get the top ten terms for each topic
top_terms <- topics %>% # take the topics data frame and..
group_by(topic) %>% # treat each topic as a different group
top_n(10, beta) %>% # get the top 10 most informative words
ungroup() %>% # ungroup
arrange(topic, -beta) # arrange words in descending informativeness
# if the user asks for a plot (TRUE by default)
if(plot == T){
# plot the top ten terms for each topic in order
topicPlot <- top_terms %>% # take the top terms
mutate(term = reorder(term, beta)) %>% # sort terms by beta value
ggplot(aes(term, beta, fill = factor(topic))) + # plot beta by theme
geom_col(show.legend = FALSE) + # as a bar plot
facet_wrap(~ topic, scales = "free") + # which each topic in a seperate plot
labs(x = NULL, y = "Beta") + # no x label, change y label
coord_flip() # turn bars sideways
return(topicPlot)
}else{
# if the user does not request a plot
# return a list of sorted terms instead
return(top_terms)
}
}
topic4Plot <- top_terms_by_topic_LDA(cleaned_documents$terms, number_of_topics = 4)
savePlot(topic4Plot, "4topic_top10_noJava.png")
topTerms_4topics <- top_terms_by_topic_LDA(cleaned_documents$terms, F ,number_of_topics = 4)
saveDoc(topTerms_4topics, "4topic_top10_noJava.csv")
topic6Plot <- top_terms_by_topic_LDA(cleaned_documents$terms, number_of_topics = 6)
savePlot(topic6Plot, "6topic_top10_noJava.png")
topTerms_6topics <- top_terms_by_topic_LDA(cleaned_documents$terms, F ,number_of_topics = 6)
saveDoc(topTerms_6topics, "6topic_top10_noJava.csv")
|
6d442fb56270fab34e15626b682e4a606fa206bd
|
0c96d5b06d6fbc73d96986854d09b8c320ac21b7
|
/moving_average.R
|
2ed26cc9d549625d1fa7bf9bdf3c68b64f427542
|
[] |
no_license
|
anastasiiakhil/Data_analysis
|
d355db25fdcb284979159c8830f7e529956122e2
|
0df00413afc473390ee03b494a08a0f7f5c5ab85
|
refs/heads/master
| 2021-06-21T20:00:22.986950
| 2021-01-14T13:56:27
| 2021-01-14T13:56:27
| 175,623,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 117
|
r
|
moving_average.R
|
moving_average <- c()
for (i in 1:(length(AirPassengers)-9)){
moving_average[i] <- mean(AirPassengers[i:(i+9)])
}
|
e0f3f60c57139c4b30a8c3d1647feb94485c3257
|
208381569541724471055030ab223b0f1cebb59b
|
/Streetmaps.R
|
7bba365410c3746fe60e00856685d1037f98f775
|
[] |
no_license
|
WickM/Streetmaps
|
80fcb32f26ad7c6cb50bb64aa9dbd85121097568
|
9194e96442093e13b99c6a77c5a618192b4c3835
|
refs/heads/master
| 2020-11-24T09:31:53.700029
| 2019-12-16T12:11:09
| 2019-12-16T12:11:09
| 228,080,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,829
|
r
|
Streetmaps.R
|
library(tidyverse)
#devtools::install_github("ropensci/osmdata")
library(osmdata)
cord <- getbb("Paris")
cord[1,1] <- 2.220000
cord[1,2] <- 2.3800000
#
#
cord[2,1] <- 48.750000
cord[2,2] <- 48.900000
#
monochrome <- FALSE
bb <- cord
short_name <- "Paris"
highway_data<-bb %>%
opq()%>%
add_osm_feature(key = "highway") %>%
osmdata_sf()
osm_lines_list<-highway_data$osm_lines%>%
split(.$highway)
polygons_list<-highway_data$osm_polygons%>%
split(.$highway)
plot_osm_lines<-function(osm_line_data,color="#7fc0ff", size=0.4, alpha=0.8){
geom_sf(data = osm_line_data ,
inherit.aes = FALSE,
color = color,
size = size,
alpha = alpha)
}
style_table<-tibble(tags=names(osm_lines_list),
pallete=viridis::viridis(length(osm_lines_list)),
size=0.4,
alpha=0.8,
number_of_lines= map_int(osm_lines_list, nrow))
style_table$size[style_table$tags%in%c("primary","secondary","tertiary","trunk")]<-1.2
style_table$pallete[style_table$tags%in%c("primary","secondary","tertiary","trunk")]<-"#ffbe7f"
style_table$alpha[style_table$tags%in%c("primary","secondary","tertiary","trunk")]<-0.8
style_table$pallete[!(style_table$tags%in%c("primary","secondary","tertiary","trunk"))]<-"#ffbe7f"
style_table$size[!(style_table$tags%in%c("primary","secondary","tertiary","trunk"))]<-0.6
style_table$alpha[!(style_table$tags%in%c("primary","secondary","tertiary","trunk"))]<-0.3
style_table$size[style_table$tags%in%c("track","path","bridleway","footway","cycleway")]<-0.3
style_table$pallete[style_table$tags%in%c("track","path","bridleway","footway","cycleway")]<-"gray48"
style_table$alpha[style_table$tags%in%c("track","path","bridleway","footway","cycleway")]<-0.05
polygon_style_table<-filter(style_table, tags %in% names(polygons_list))
if(monochrome){
if(nrow(style_table)>0){
style_table$pallete<-"black"
}
if(nrow(polygon_style_table)>0){
polygon_style_table$pallete<-"black"
}
}
polygons_list<-polygons_list[names(polygons_list)[names(polygons_list) %in% names(osm_lines_list)]]
p<-ggplot() +
pmap(
list(
osm_line_data = osm_lines_list,
color = style_table$pallete,
size = style_table$size,
alpha = style_table$alpha
),
plot_osm_lines)
q<-p+pmap(
list(
osm_line_data = polygons_list,
color = polygon_style_table$pallete,
size = polygon_style_table$size,
alpha = polygon_style_table$alpha
),
plot_osm_lines)
trim_x<- 0.001
trim_y <- 0.001
r<-q+
coord_sf(
xlim = c(bb[1, 1] + trim_x, bb[1, 2] - trim_x),
ylim = c(bb[2, 1] + trim_y, bb[2, 2]) - trim_y,
expand = FALSE
)
s<-r+theme(axis.text = element_blank(), plot.margin=unit(c(4,4,6,4),"cm"),
panel.grid.major = element_line(colour = ifelse(monochrome,"white","#282828")),
panel.grid.minor = element_line(colour = ifelse(monochrome,"white","#282828")),
plot.background = element_rect(fill = ifelse(monochrome,"white","#282828")),
panel.background = element_rect(fill = ifelse(monochrome,"white","#282828")),
plot.caption = element_text(hjust = 0.5, color = ifelse(monochrome,"#282828","white"), size = 40),
panel.border = element_rect(colour = ifelse(monochrome,"gray48","white"), fill=NA, size=2),
axis.ticks = element_blank())
output<-s+labs(caption=paste0("\n",paste(rep("_",nchar(short_name)+4),collapse = ""),"\n",toupper(short_name)))
#print(r)
ggsave("maps/Paris2.png", plot = output ,dpi = 600, width = 594, height = 841, units = "mm")
|
15325ffbfe58fc1f0b8ed116b144bd5e28919611
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.developer.tools/man/wellarchitected_create_lens_version.Rd
|
a9cbeaff250e103ba75c1c59d51cd6208bb1d8af
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 738
|
rd
|
wellarchitected_create_lens_version.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wellarchitected_operations.R
\name{wellarchitected_create_lens_version}
\alias{wellarchitected_create_lens_version}
\title{Create a new lens version}
\usage{
wellarchitected_create_lens_version(
LensAlias,
LensVersion,
IsMajorVersion = NULL,
ClientRequestToken
)
}
\arguments{
\item{LensAlias}{[required]}
\item{LensVersion}{[required] The version of the lens being created.}
\item{IsMajorVersion}{Set to true if this new major lens version.}
\item{ClientRequestToken}{[required]}
}
\description{
Create a new lens version.
See \url{https://www.paws-r-sdk.com/docs/wellarchitected_create_lens_version/} for full documentation.
}
\keyword{internal}
|
455d3557bfac92c35a2edb3ad8928491f81b9a1a
|
467ff9c3708853fa099b3e68bb08c5ef54aa9c99
|
/ExploratoryDataAnalysis/project-1/plot2.R
|
3c3d1cce523a3e8f85e85664ec057693c799ed4e
|
[] |
no_license
|
bmei/datasciencecoursera
|
1063ef1e3a50493a81a8077a89ca0d447ea6a4cf
|
dd2379ba7c497d69f717e20904c26bc788a41c63
|
refs/heads/master
| 2021-01-18T18:25:30.687680
| 2015-03-19T03:53:56
| 2015-03-19T03:53:56
| 19,218,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 592
|
r
|
plot2.R
|
# read data
data <- read.table("household_power_consumption.txt", sep=';', header=TRUE, na.string='?')
# change the class and format of Date
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# subset the data
mydata <- data[data$Date<="2007-02-02" & data$Date>="2007-02-01",]
# form the datetime column by combining the Date and Time columns
mydata$datetime <- as.POSIXlt(paste(mydata$Date, mydata$Time))
# plot #2
png("plot2.png", width=480, height=480, units="px")
with(mydata, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
bd61bd7017666fd5bd0247dc2d101aac60abd9dc
|
7e276072d233e583e975b63024503689f4149619
|
/man/df_hist.Rd
|
c07707a6d8585b4b61f3a1e688fc07663373d94b
|
[] |
no_license
|
Davide-bll/Shinymod
|
924203095ddc536bdaead56b39b77593f1ce4bdb
|
8c117e56cebdee184f6f76491dd0c5bf6460b382
|
refs/heads/master
| 2023-04-06T05:59:03.448747
| 2021-04-13T18:22:24
| 2021-04-13T18:22:24
| 260,253,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 412
|
rd
|
df_hist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphics.R
\name{df_hist}
\alias{df_hist}
\title{create an histogram object}
\usage{
df_hist(df, x_col, title = "Aggregate", custom = NULL)
}
\arguments{
\item{df}{df}
\item{x_col}{col to represent}
\item{title}{optional title}
\item{custom}{optional cusyom}
}
\value{
histogram object
}
\description{
create an histogram object
}
|
8b15a78f09005e7acb5bd6af9681481817c35c7c
|
f14e3a8823d00a12c25493ff12a7303c8f8fe305
|
/man/heatmap.Rd
|
4c590929442618f9d3be3747df26e90577310962
|
[] |
no_license
|
cran/rtrim
|
6565c068369efec0e9867b5fe397a641eb859638
|
80239e3f7cbeb66b9540284eed9fa1bd946d4666
|
refs/heads/master
| 2020-06-17T16:12:35.734973
| 2020-04-21T11:20:02
| 2020-04-21T11:20:02
| 74,989,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,934
|
rd
|
heatmap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trim_heatmap.R
\name{heatmap}
\alias{heatmap}
\title{Plot a heatmap representation of observed and/or imputed counts.}
\usage{
heatmap(
z,
what = c("data", "imputed", "fitted"),
log = TRUE,
xlab = "auto",
ylab = "Site #",
...
)
}
\arguments{
\item{z}{output of a call to \code{\link{trim}}.}
\item{what}{the type of heatmap to be plotted: 'data' (default), 'imputed' or 'fitted'.}
\item{log}{flag to indicate whether the count should be log-transformed first.}
\item{xlab}{x-axis label. The default value "auto" will evaluate to either "Year" or "Time point"}
\item{ylab}{y-axis label}
\item{...}{other parameters to be passed to \code{\link[graphics]{plot}}}
}
\description{
This function organizes the observed and/or imputed counts into a matrix where
rows represent sites and columns represent time points.
A bitmap image is constructed in which each pixel corresponds to an element of this matrix.
Each pixel is colored according the correspondong count status, and the type of heatmap plot requested ('data', 'imputed' or 'fitted').
}
\details{
The 'imputed' heatmap uses the most elaborate color scheme:
Site/time combinations that are observed are colored red, the higher the count, the darker the red.
Site/time combinations that are imputed are colored blue, the higher the estimate, the darker the blue.
For the 'data' heatmap, missing site/time combinations are colored gray.
For the 'fitted' heatmap, all site/time combinations are colored blue.
By default, all counts are log-transformed prior to colorization, and observed counts of 0 are indicates as white pixels.
}
\examples{
data(skylark2)
z <- trim(count ~ site + year, data=skylark2, model=3)
heatmap(z,"imputed")
}
\seealso{
Other graphical post-processing:
\code{\link{plot.trim.index}()},
\code{\link{plot.trim.totals}()}
}
\concept{graphical post-processing}
|
8696720313b4c2c9ad63d987a3814ff4d6fc0181
|
a2bcf83193959276f7bd8db656db53e58e875a26
|
/shiny/app.R
|
dca42c7538ea99aca705ecf8ef984e7b86de9c9f
|
[] |
no_license
|
PieceMaker/presentation-lsdg-docker
|
2b3c22fafdfe390eb9e60499c9083b4c1d461065
|
09aa739cfad27583d344cbae6a653dcb5700ce99
|
refs/heads/master
| 2020-04-21T06:16:15.830018
| 2019-02-09T05:03:32
| 2019-02-09T05:03:32
| 169,361,528
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,072
|
r
|
app.R
|
# install.packages("DT")
library(shiny)
# Read in credit risk data
creditData <- read.csv('./german_credit_data.csv', header = T, stringsAsFactors = T)
creditRiskModel <- glm(
Risk ~ Age + Sex + Job + Housing + Saving.accounts + Checking.account + Credit.amount + Duration + Purpose,
data = creditData,
family = 'binomial'
)
jobLookup <- list("Unskilled Non-Resident" = 0, "Unskilled Resident" = 1, "Skilled" = 2, "Highly Skilled" = 3)
simpleCap <- function(x) {
s <- strsplit(x, " ")[[1]]
s <- paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
s <- strsplit(s, "/")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse="/")
}
# NOT BEST PRACTICE!!
resultsTable <<- data.frame(
Age = NA,
Sex = NA,
Job = NA,
Housing = NA,
Savings = NA,
Checking = NA,
Amount = NA,
Duration = NA,
Purpose = NA,
Probability = NA,
Approval = NA
)
app <- shinyApp(
ui = fluidPage(
titlePanel("Shiny Credit Approval System"),
sidebarLayout(
sidebarPanel(
sliderInput(inputId = "age", label = "Age:", value = 25, min = 18, max = 75, step = 1),
selectInput(inputId = "sex", label = "Sex:", choices = sapply(levels(creditData$Sex), simpleCap, USE.NAMES = F)),
selectInput(inputId = "job", label = "Job:", choices = c("Unskilled Non-Resident", "Unskilled Resident", "Skilled", "Highly Skilled")),
selectInput(inputId = "housing", label = "Housing:", choices = sapply(levels(creditData$Housing), simpleCap, USE.NAMES = F)),
selectInput(inputId = "savings", label = "Savings:", choices = sapply(levels(creditData$Saving.accounts), simpleCap, USE.NAMES = F)),
selectInput(inputId = "checking", label = "Checking:", choices = sapply(levels(creditData$Checking.account), simpleCap, USE.NAMES = F)),
numericInput(inputId = "amount", label = "Amount:", value = 1000, min = 100, max = 25000, step = 1),
sliderInput(inputId = "duration", label = "Duration (in months):", value = 12, min = 6, max = 72, step = 6),
selectInput(inputId = "purpose", label = "Loan Purpose:", choices = sapply(levels(creditData$Purpose), simpleCap, USE.NAMES = F)),
actionButton(inputId = "calculate", "Calculate")
),
mainPanel(
DT::dataTableOutput("resultsTable")
)
)
),
server = function(input, output) {
predictAcceptance <- observeEvent(input$calculate, {
print("calculate clicked")
sex <- tolower(input$sex)
job <- jobLookup[[input$job]]
housing <- tolower(input$housing)
savings <- tolower(input$savings)
checking <- tolower(input$checking)
purpose <- tolower(input$purpose)
if(purpose == "radio/tv") {
purpose <- "radio/TV"
}
newData <- data.frame(
Age = input$age,
Sex = sex,
Job = job,
Housing = housing,
Saving.accounts = savings,
Checking.account = checking,
Credit.amount = input$amount,
Duration = input$duration,
Purpose = purpose
)
probability <- predict(creditRiskModel, newdata = newData, type = "response")
approval <- ifelse(probability >= 0.5, "Approved", "Denied")
results <- data.frame(
Age = input$age,
Sex = input$sex,
Job = input$job,
Housing = input$housing,
Savings = input$savings,
Checking = input$checking,
Amount = input$amount,
Duration = input$duration,
Purpose = input$purpose,
Probability = probability,
Approval = approval
)
print(results)
# Again, not best practice!!
if(all(is.na(resultsTable))) {
resultsTable <<- results
} else {
resultsTable <<- rbind(results, resultsTable)
}
output$resultsTable <- DT::renderDataTable(DT::datatable(resultsTable, rownames = F))
})
# Render blank
output$resultsTable <- DT::renderDataTable(DT::datatable(resultsTable, rownames = F))
}
)
runApp(app, port = 3838, host = "0.0.0.0")
|
b306a6db3426f81199c26ff4bd2ae6be78c29855
|
43e38032774538ac92ed367ee2a4fbe9fab6a413
|
/plot4.R
|
bae5ccac85be189e76e296165de8379a262eb53c
|
[] |
no_license
|
cgrdavis/ExploratoryData
|
d8e8f0229f460c6c178cc7a29d126a2865cae3f3
|
add67831947934be0b08cd331f64bd21a2139433
|
refs/heads/master
| 2021-01-19T06:56:36.520330
| 2015-09-12T22:12:09
| 2015-09-12T22:12:09
| 42,367,997
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,899
|
r
|
plot4.R
|
##Getting and Cleaning Data
##Course 4 - Exploratory Data
##Course Project #1 - Plot 3
##identify the zip file
fileZip <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileZip, destfile="data.zip", method="wininet")
dateDownloaded <- date()
data2 <- data.frame()
##Unzip the directory
(unzip("data.zip"))
##Placing in a table
data <- read.table("household_power_consumption.txt", sep=";", header=TRUE, stringsAsFactors=FALSE)
##Subset rows for the table by desired date
desDate = data$Date == "1/2/2007" | data$Date == "2/2/2007"
data2 <- data[desDate,]
library(datasets)
##Changing to numeric
data2$Global_active_power <- as.numeric(data2$Global_active_power)
##Merging date and time
data2$DateTime <- strptime(paste(data2$Date, data2$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
##Changing column position to the beginning
data2 <- data2[c(10,1,2,3,4,5,6,7,8,9)]
##Line Graph
png("plot4.png", width=480, height=480)
with(data2, plot(DateTime, Sub_metering_1, ylab="Energy sub metering", xlab="", type = "l", col="black"))
with(data2, lines(DateTime, Sub_metering_2, col = "red"))
with(data2, lines(DateTime, Sub_metering_3, col = "blue"))
legend("topright", lty=1,col = c("black","red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
par(mfrow = c(2, 2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
with(data2, {
plot(DateTime, Global_active_power, ylab="Global Active Power", xlab="", type = "l")
plot(DateTime, Voltage, type="l")
plot(DateTime, Sub_metering_1, ylab="Energy sub metering", xlab="", type = "l", col="black")
lines(DateTime, Sub_metering_2, col = "red")
lines(DateTime, Sub_metering_3, col = "blue")
legend("topright", lty=1,col = c("black","red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(DateTime, Global_reactive_power, type="l")
})
dev.off()
|
80aa68dbae1cdbad87eef2d95ab85c5d4b6fefb5
|
f90481e75ba7830811c8a4b214dc36fd0ecc0e95
|
/web/_data/R_code.R
|
c64294303cc98dc5655bb766c5932793c1ffb5e6
|
[
"MIT"
] |
permissive
|
ecoinfAEET/dataton_sibecol_2019
|
1fff2897cfd6ede06d1f7842d3d4e7f096827667
|
738d64e75f60f13a997adbce41432a4476dd4a9b
|
refs/heads/master
| 2022-05-01T03:24:50.592242
| 2022-04-12T13:07:31
| 2022-04-12T13:07:31
| 157,699,455
| 1
| 2
|
MIT
| 2020-07-22T10:36:39
| 2018-11-15T11:19:40
|
HTML
|
UTF-8
|
R
| false
| false
| 1,154
|
r
|
R_code.R
|
# Load data from Gbif and then run the following code:
# Name 'dat' to your data
ji <- function(xy, origin=c(0,0), cellsize=c(0.1,0.1)) {
t(apply(xy, 1, function(z) cellsize/2+origin+cellsize*(floor((z - origin)/cellsize))))
}
JI <- ji(cbind(dat$decimalLongitude, dat$decimalLatitude))
dat$X <- JI[, 1]
dat$Y <- JI[, 2]
dat$Cell <- paste(dat$X, dat$Y)
counts <- by(dat, dat$Cell, function(d) c(d$X[1], d$Y[1],
length(unique(d$scientificName))))
head(counts)
counts.m <- matrix(unlist(counts), nrow=3)
rownames(counts.m) <- c("X", "Y", "Count")
write.csv(as.data.frame(t(counts.m)), "grid.csv")
count.max <- max(counts.m["Count",])
colors = sapply(counts.m["Count",], function(n) hsv(sqrt(n/count.max), .7, .7, .5))
plot(counts.m["X",] + 1/2, counts.m["Y",] + 1/2, cex=1,
pch = 19, col=colors, xlim=c(-12,5), ylim=c(35,45),
xlab="Longitude of cell center", ylab="Latitude of cell center",
main="Shrub richness within 1 km grid cells")
# Code was extracted and modified from:
# https://gis.stackexchange.com/questions/48416/aggregating-points-to-grid-using-r
|
a5a97cf8a3ea85cdca72d54310adcdacf605e5a1
|
21bb3d4d09bc942b6ee2670d7472938803a384ca
|
/man/ler_pnadc_anual.Rd
|
d4e5f2a39b1fcd1bb0cdb3ddc544080c9b3fc10b
|
[
"MIT"
] |
permissive
|
pedrorubin/pnadcpackage
|
a17e1f6113053e2c31f40192b2a972e481de96cc
|
d13918cf8252dd24e74c08c81c0603f5746fc270
|
refs/heads/master
| 2023-08-22T05:16:46.557475
| 2021-10-27T13:42:31
| 2021-10-27T13:42:31
| 418,287,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 608
|
rd
|
ler_pnadc_anual.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ler_pnadc_anual.R
\name{ler_pnadc_anual}
\alias{ler_pnadc_anual}
\title{Carregar os microdados da PNADC anual}
\usage{
ler_pnadc_anual(ano, caminho_pnadc)
}
\arguments{
\item{ano}{ano}
\item{caminho_pnadc}{A pasta na qual os arquivos de microdados estão guardados (em geral a mesma utilizada na função baixar_pnadc_anual)}
}
\value{
Um dataframe com todos os microdados
}
\description{
Carregar (para o R) os microdados da PNADC anual
}
\examples{
ler_pnadc_anual(2018, "./pasta_microdados");
}
\seealso{
baixar_pnadc_anual
}
|
206027cdbc469d2835f1b04fe2fbc26282b35792
|
882819a9ae9827a1ccde551479c994a782622da3
|
/tests/testthat/tests_plotplcfs.R
|
5c5d3fa3f465b70df94f295661b0a9291d7a578b
|
[] |
no_license
|
reumandc/fspack
|
f3f2b691c247e960157b18f835a1a5a79770b72b
|
260aa14ffc4eab3d5e3e9c44a11e60981eca76f9
|
refs/heads/master
| 2020-03-28T03:56:46.010442
| 2018-10-16T17:13:49
| 2018-10-16T17:13:49
| 147,683,545
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,896
|
r
|
tests_plotplcfs.R
|
context("plotplcfs")
test_that("test error handling",{
expect_error(plotplcfs("test",xlabel="testx",ylabel="testy"),
"Error in plotplcfs: plcflist must be a list of plcf objects")
expect_error(plotplcfs(list("test"),xlabel="testx",ylabel="testy"),
"Error in plotplcfs: plcflist must be a list of plcf objects")
plcf1<-plcf(c(1,2,3,4),c(1,0,1,4))
plcf2<-plcf(c(1.5,2.5,3.5),c(-1,-2,-5))
h<-list(plcf1,plcf2)
expect_error(plotplcfs(h,xlabel="testx",ylabel="testy",filename=1),
"Error in plotplcfs: inappropriate filename argument")
expect_error(plotplcfs(h,xlabel="testx",ylabel="testy",bds="test"),
"Error in plotplcfs: bds must be numeric")
expect_error(plotplcfs(h,xlabel="testx",ylabel="testy",bds=1),
"Error in plotplcfs: bds must be length 2")
expect_error(plotplcfs(h,xlabel="testx",ylabel="testy",bds=c(1,NA)),
"Error in plotplcfs: bds must have finite elements")
expect_error(plotplcfs(h,xlabel="testx",ylabel="testy",bds=c(3,2)),
"Error in plotplcfs: first element of bds must be less than second element")
expect_error(plotplcfs(h,xlabel=1,ylabel="testy"),
"Error in plotplcfs: inappropriate xlabel argument")
expect_error(plotplcfs(h,xlabel="testx",ylabel=c("testy1","testy2")),
"Error in plotplcfs: inappropriate ylabel argument")
})
test_that("test the output for some simple cases",{
plcf1<-plcf(c(1,2,3,4),c(1,0,1,5))
plcf2<-plcf(c(1.5,2.5,3.5),c(-1,-2,-5))
h<-list(plcf1,plcf2)
Test_plotplcfs_1<-function(){plotplcfs(plcflist=h[1],xlabel="time (days)",ylabel="river km")}
expect_doppelganger(title="Test-plotplcfs-1",fig=Test_plotplcfs_1)
Test_plotplcfs_2<-function(){plotplcfs(plcflist=h,xlabel="text x lab",ylabel="text y lab")}
expect_doppelganger(title="Test-plotplcfs-2",fig=Test_plotplcfs_2)
})
|
b28049a838ad4d8d36541b49166c7b071d3b18e8
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/WebGestaltR/R/idMappingPhosphosite.R
|
e9c5a54e5ba359e8c721a1b4321745ecc0d5e74e
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,099
|
r
|
idMappingPhosphosite.R
|
#' @importFrom httr POST content
#' @importFrom dplyr right_join select left_join %>%
idMappingPhosphosite <- function(organism="hsapiens", dataType="list", inputGeneFile=NULL, inputGene=NULL, sourceIdType, targetIdType, collapseMethod="mean", mappingOutput=FALSE, outputFileName="", hostName="http://www.webgestalt.org/") {
###########Check input data type###############
inputGene <- idMappingInput(dataType=dataType,inputGeneFile=inputGeneFile,inputGene=inputGene)
##########ID Mapping Specify to phosphosite level###############
if(dataType=="list"){
inputGeneL <- unique(inputGene)
}
if(dataType=="rnk"){
######Collapse the gene ids with multiple scores##########
x <- tapply(inputGene$score, inputGene$gene, collapseMethod)
inputGene <- data.frame(gene=names(x),score=as.numeric(x),stringsAsFactors=FALSE)
inputGeneL <- inputGene$gene
colnames(inputGene) <- c(sourceIdType,"score")
}
if(dataType=="gmt"){
colnames(inputGene) <- c("geneSet", "link", sourceIdType)
inputGeneL <- unique(inputGene$gene)
}
if (startsWith(hostName, "file://")) {
sourceMap <- read_tsv(
removeFileProtocol(file.path(hostName, "xref", paste(organism, sourceIdType, "phosphositeSeq.table", sep="_"))),
col_names=c("phosphositeSeq", "userId"), col_types="cc", quote=""
) %>% filter(.data$userId %in% inputGeneL)
if (targetIdType == "phosphositeSeq" || targetIdType == sourceIdType) {
mappedInputGene <- sourceMap
} else {
targetMap <- read_tsv(
removeFileProtocol(file.path(hostName, "xref", paste(organism, targetIdType, "phosphositeSeq.table", sep="_"))),
col_names=c("phosphositeSeq", targetIdType), col_types="cc", quote=""
)
mappedInputGene <- inner_join(sourceMap, targetMap, by=c("phosphositeSeq"))
}
if (nrow(mappedInputGene) == 0) { return(idMappingError("empty")) }
mappedInputGene <- select(mappedInputGene, .data$userId, targetIdType)
unmappedIds <- setdiff(inputGeneL, mappedInputGene$userId)
} else {
response <- POST(file.path(hostName, "api", "idmapping"), encode="json",
body=list(organism=organism, sourceType=sourceIdType,
targetType=targetIdType, ids=inputGeneL, standardId="phosphositeSeq")
)
if (response$status_code != 200) {
stop(webRequestError(response))
}
mapRes <- content(response)
if (mapRes$status == 1) {
stop(webApiError(mapRes))
}
mappedIds <- mapRes$mapped
unmappedIds <- unlist(mapRes$unmapped)
if (length(mappedIds) == 0) { stop(idMappingError("empty")) }
names <- c("sourceId", "targetId")
mappedInputGene <- data.frame(matrix(unlist(lapply(replace_null(mappedIds), FUN=function(x) { x[names] })), nrow=length(mappedIds), byrow=TRUE), stringsAsFactors=FALSE)
colnames(mappedInputGene) <- c("userId", targetIdType)
}
### Get gene name and symbol in 2nd step, either direct by geneid or mapping to uniprot ambiguously
# TODO mapping to target other than 15mer may introduce ambiguity, like DTQIKRNtFVGTPFW maps to three STKs in uniprot.
# Not essential for WG, but could use protein ID to determine
if (grepl("Uniprot", sourceIdType, fixed=TRUE) || grepl("Ensembl", sourceIdType, fixed=TRUE) || grepl("Refseq", sourceIdType, fixed=TRUE)) { ##if the sourceIdType is Uniprot, Ensembl or Refseq, directly extract the gene level id####
mappedInputGene$gene <- unlist(lapply(strsplit(mappedInputGene$userId, "_"), .combineG))
}else{
###If the input id type is sequence, we will first map the sequence to uniprot. And then map the uniprot to gene name####
if (targetIdType == "phosphositeUniprot") {
mappedInputGene$gene <- unlist(lapply(strsplit(mappedInputGene[, targetIdType], "_"), .combineG))
} else {
if (startsWith(hostName, "file://")) {
uniMapRes <- read_tsv(
removeFileProtocol(file.path(hostName, "xref", paste(organism, "phosphositeUniprot", "phosphositeSeq.table", sep="_"))),
col_names=c("phosphositeSeq", "phosphositeUniprot"), col_types="cc", quote=""
) %>% filter(.data$phosphositeSeq %in% mappedInputGene$phosphositeSeq)
} else {
response <- POST(file.path(hostName, "api", "idmapping"), encode="json",
body=list(organism=organism, sourceType="phosphositeSeq", standardId="phosphositeSeq",
targetType="phosphositeUniprot", ids=inputGeneL)
)
if (response$status_code != 200) {
stop(webRequestError(response))
}
uniMapRes <- content(response)
if (uniMapRes$status == 1) {
stop(webApiError(uniMapRes))
}
if (length(uniMapRes$mapped) == 0) { return(idMappingError("empty")) }
names <- c("sourceId", "targetId")
uniMapRes <- data.frame(matrix(unlist(lapply(replace_null(uniMapRes$mapped), FUN=function(x) { x[names] })), nrow=length(uniMapRes$mapped), byrow=TRUE), stringsAsFactors=FALSE)
colnames(uniMapRes) <- c("phosphositeSeq", "phosphositeUniprot")
}
uniMapRes$gene <- unlist(lapply(strsplit(uniMapRes[, "phosphositeUniprot"], "_"), .combineG))
# Map ID may change nrow due to unmapped ones
mappedInputGene <- uniMapRes %>% select(.data$phosphositeSeq, .data$gene) %>% right_join(mappedInputGene, by="phosphositeSeq")
}
}
#####Hard code#######
if (grepl("Uniprot", sourceIdType, fixed=TRUE) || sourceIdType == "phosphositeSeq") {
geneType <- "uniprotswissprot"
outLink <- "http://www.uniprot.org/uniprot/"
}
if (grepl("Ensembl", sourceIdType, fixed=TRUE)) {
geneType <- "ensembl_peptide_id"
outLink <- paste("http://www.ensembl.org/",organism,"/Gene/Summary?db=core;t=",sep="")
}
if (grepl("Refseq", sourceIdType, fixed=TRUE)) {
geneType <- "refseq_peptide"
outLink <- "https://www.ncbi.nlm.nih.gov/protein/"
}
mappedInputGene$gLink <- paste0(outLink, mappedInputGene$gene)
########Get gene level information#########
entrezgeneMapRes <- idMappingGene(organism=organism, dataType="list", inputGene=mappedInputGene$gene, sourceIdType=geneType, targetIdType="entrezgene", mappingOutput=FALSE, hostName=hostName)
mergedRes <- entrezgeneMapRes$mapped %>% select(gene=.data$userId, .data$geneSymbol, .data$geneName) %>%
right_join(mappedInputGene, by="gene")
if(dataType=="list"){
inputGene <- select(mergedRes, .data$userId, .data$geneSymbol, .data$geneName, targetIdType, .data$gLink)
}
if(dataType=="rnk"){
inputGene <- mergedRes %>% left_join(inputGene, by=c("userId"=sourceIdType)) %>%
select(.data$userId, .data$geneSymbol, .data$geneName, targetIdType, .data$score, .data$gLink)
}
if(dataType=="gmt"){
inputGene <- mergedRes %>% left_join(inputGene, by=c("userId"=sourceIdType)) %>%
select(.data$geneSet, .data$link, .data$userId, .data$geneSymbol, .data$geneName, targetIdType, .data$gLink)
}
#############Output#######################
if (mappingOutput) {
idMappingOutput(outputFileName, inputGene, unmappedIds, dataType, sourceIdType, targetIdType=targetIdType)
}
r <- list(mapped=inputGene,unmapped=unmappedIds)
return(r)
}
.combineG <- function(e){
e <- e[-length(e)]
e <- paste(e,collapse="_")
return(e)
}
|
213ae54a982760cd8ecd0179bf4caeb74a555b95
|
1e800858d0d3130be58458b284f31cc196f48f10
|
/simu.main.cluster5.sparse.R
|
e02f9ba4318a01099c2f5eb77b6191e4ca12b275
|
[] |
no_license
|
ShanYu3393/fusionFLM
|
6f1ac495e680b10ac193abfdbff59b28c83cb12f
|
e216f63d2ff9adc86b64a1c78568966af22a92df
|
refs/heads/master
| 2022-11-09T12:46:23.769244
| 2020-06-23T15:17:42
| 2020-06-23T15:17:42
| 273,521,201
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,065
|
r
|
simu.main.cluster5.sparse.R
|
rm(list = ls())
# library
library(igraph)
library(fda)
library(fossil)
library(mclust)
library(cluster)
library(parallel)
# source files
source('alpha.func.5.R')
source('int.R')
source('int.2.R')
source('simu.data.generating.R')
source('delta.matrix.R')
source('edge.generating.R')
source('fit.fusionFLM.R')
source('plot.alpha.R')
source('plot.beta.R')
source('admm.flm.R')
source('group.identify.R')
source('fusionFLM.R')
source('soft.thresh.R')
source('jaccard.R')
source('eval.FLM.R')
source('kmeans.fusionFLM.R')
source('ind.FLM.R')
source('oracle.FLM.R')
# read in data
load('data/kinship.matrix.yield.rda')
load('data/true.group.5.rda')
# simulation setting
n.subgroup <- length(true.group.5)
n.ober <- 50
error.sd <- 0.5
number.knn <- 10
n.T <- 100
n.X <- 1
t.range <- c(0, 1)
Q.alpha1 <- model.matrix(~ as.factor(true.group.5) + 0)
Q.intercept <- cbind(Q.alpha1[, 1] + Q.alpha1[, 5], rowSums(Q.alpha1[, 2:4]))
Q.list <- list(Q.intercept, Q.alpha1)
beta <- c(5, 6)
alpha <- list(list(alpha1, alpha2, alpha3, alpha4, alpha5))
true.cluster = rbind(Q.intercept[, 1] + 2 * Q.intercept[, 2], true.group.5)
true.beta <- beta
true.alpha <- alpha
# true coefficient function
col <- categorical_pal(5)
time.point <- (0:100) * 0.01
plot(time.point, alpha1(time.point), type = "l", col = col[1], lwd = 2,
ylab = expression(alpha), xlab = 't')
points(time.point, alpha2(time.point), type = "l", col = col[2], lwd = 2)
points(time.point, alpha3(time.point), type = "l", col = col[3], lwd = 2)
points(time.point, alpha4(time.point), type = "l", col = col[4], lwd = 2)
points(time.point, alpha5(time.point), type = "l", col = col[5], lwd = 2)
# spline basis
N <- 4
rho <- 2
nbasis <- N + rho + 1
sp.basis <- create.bspline.basis(t.range, nbasis = nbasis, norder = rho + 1)
# construct graph for the fused lasso
edge.matrix.prior <- edge.generating(subgroup.name = 1:n.subgroup,
type = 'knn', dist = kinship.matrix.yield,
knn = number.knn, vertex.size = 6,
Group.est = true.group.5)
# fit the model
lambda1 <- 10^seq(-2, 2, by = 1)
lambda2 <- 10^seq(-2, 2, by = 1)
Lambda1 <- expand.grid(lambda1, lambda2)
lambda1 <- 10^seq(-2, 2, by = 1)
lambda2 <- 10^seq(-2, 2, by = 1)
Lambda2 <- expand.grid(lambda1, lambda2)
Lambda.list = list(Lambda1, Lambda2)
# generate data
simu.main <- function (iter) {
set.seed(iter)
print(iter)
data.simu <- simu.data.generating(n.subgroup, n.ober, n.T, n.X,
Q.list, beta, alpha, error.sd, t.range = t.range)
t0 <- proc.time()
fitted.oracle <- oracle.FLM(data = data.simu, Q.list = Q.list,
sp.basis = sp.basis)
t.oracle <- proc.time() - t0
# individual group estimator
t0 <- proc.time()
fitted.ind <- ind.FLM(data = data.simu, sp.basis)
t.ind <- proc.time() - t0
# functional linear regression, k-means
t0 <- proc.time()
fitted.kmeans <- kmeans.fusionFLM(data = data.simu, sp.basis)
t.kmeans <- proc.time() - t0
# functional linear regression, sparse graph
t0 <- proc.time()
fitted.sparse.g1 <- fusionFLM(data = data.simu, sp.basis = sp.basis,
edge.matrix = edge.matrix.prior,
Lambda.list = Lambda.list, initial.type = 'kmeans',
objective.path = FALSE, save.plot = FALSE)
t.sparse.g1 <- proc.time() - t0
t0 <- proc.time()
fitted.sparse.g2 <- fusionFLM(data = data.simu, sp.basis = sp.basis,
edge.matrix = edge.matrix.prior,
Lambda.list = Lambda.list, initial.type = 'individual',
objective.path = FALSE, save.plot = FALSE)
t.sparse.g2 <- proc.time() - t0
t0 <- proc.time()
fitted.sparse.g3 <- fusionFLM(data = data.simu, sp.basis = sp.basis,
edge.matrix = edge.matrix.prior,
Lambda.list = Lambda.list, initial.type = 'lasso',
objective.path = FALSE, save.plot = TRUE)
t.sparse.g3 <- proc.time() - t0
result.oracle <- eval.FLM(fitted.oracle, true.cluster, true.beta, true.alpha, sp.basis)
result.ind <- eval.FLM(fitted.ind, true.cluster, true.beta, true.alpha, sp.basis)
result.kmeans <- eval.FLM(fitted.kmeans, true.cluster, true.beta, true.alpha, sp.basis)
result.sparse.g1 <- eval.FLM(fitted.sparse.g1, true.cluster, true.beta, true.alpha, sp.basis)
result.sparse.g2 <- eval.FLM(fitted.sparse.g2, true.cluster, true.beta, true.alpha, sp.basis)
result.sparse.g3 <- eval.FLM(fitted.sparse.g3, true.cluster, true.beta, true.alpha, sp.basis)
time.all <- c(t.oracle[3], t.ind[3], t.kmeans[3], t.sparse.g1[3], t.sparse.g2[3],
t.sparse.g3[3])
cluster.all <- rbind(result.oracle$cluster.results,
result.ind$cluster.results,
result.kmeans$cluster.results,
result.sparse.g1$cluster.results,
result.sparse.g2$cluster.results,
result.sparse.g3$cluster.results)
rmse.beta.all <- rbind(result.oracle$rmse.beta,
result.ind$rmse.beta,
result.kmeans$rmse.beta,
result.sparse.g1$rmse.beta,
result.sparse.g2$rmse.beta,
result.sparse.g3$rmse.beta)
rmise.alpha.all <- rbind(result.oracle$rmise.alpha,
result.ind$rmise.alpha,
result.kmeans$rmise.alpha,
result.sparse.g1$rmise.alpha,
result.sparse.g2$rmise.alpha,
result.sparse.g3$rmise.alpha)
list(time.all, cluster.all, rmse.beta.all, rmise.alpha.all)
}
# result <- simu.main(1)
# print(result)
result <- mclapply(1:100, simu.main, mc.cores = 16)
save(file = paste0('result/cluster5', 'n', n.ober, 'sigma', error.sd, '.rda'), result)
|
aa276e33b6d9cad5baffbd36828ac3446d8d32f9
|
e2a5cdf2dcbd788ac7c091897b5a027a809c302a
|
/man/latlongNeighborhoodData.Rd
|
547e86aebe1c3e49a5197cb4df8320e01995813f
|
[] |
no_license
|
lindbrook/cholera
|
3d20a0b76f9f347d7df3eae158bc8a357639d607
|
71daf0de6bb3fbf7b5383ddd187d67e4916cdc51
|
refs/heads/master
| 2023-09-01T01:44:16.249497
| 2023-09-01T00:32:33
| 2023-09-01T00:32:33
| 67,840,885
| 138
| 13
| null | 2023-09-14T21:36:08
| 2016-09-10T00:19:31
|
R
|
UTF-8
|
R
| false
| true
| 792
|
rd
|
latlongNeighborhoodData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/latlongNeighborhoodData.R
\name{latlongNeighborhoodData}
\alias{latlongNeighborhoodData}
\title{Compute network graph of roads, cases and pumps.}
\usage{
latlongNeighborhoodData(vestry = FALSE, case.set = "observed",
embed.addr = TRUE, multi.core = TRUE)
}
\arguments{
\item{vestry}{Logical.}
\item{case.set}{Character. "observed" or "expected".}
\item{embed.addr}{Logical. Embed case address into graph network.}
\item{multi.core}{Logical or Numeric. \code{TRUE} uses \code{parallel::detectCores()}. \code{FALSE} uses one, single core. You can also specify the number logical cores. See \code{vignette("Parallelization")} for details.}
}
\description{
Assembles cases, pumps and road into a network graph.
}
|
13ed183a261999f2ea61ac368b6f05177fdf0bcb
|
8543b54ea30ee36884605dbc2ab60ad79cdac285
|
/man/string2mat.Rd
|
de3f3c7e2689de4a580c517f90318c7cad93bfe7
|
[] |
no_license
|
cran/BiDAG
|
3486d63fff82d3285b084e4ccfabb5198f4e81d5
|
6097d8ea159f5e4dc3c95b6d815d8e827efd5532
|
refs/heads/master
| 2023-05-27T11:45:52.035215
| 2023-05-16T11:46:02
| 2023-05-16T11:46:02
| 97,764,089
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,823
|
rd
|
string2mat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphhelpfns.R
\name{string2mat}
\alias{string2mat}
\title{Deriving interactions matrix}
\usage{
string2mat(curnames, int, mapping = NULL, type = c("int"), pf = 2)
}
\arguments{
\item{curnames}{character vector with gene names which will be used in \code{BiDAG} learning function}
\item{int}{data frame, representing a interactions between genes/proteins downloaded from STRING (\url{https://string-db.org/}); two columns are necessary 'node1' and 'node2'}
\item{mapping}{(optional) data frame, representing a mapping between 'curnames' (gene names, usually the column names of 'data') and gene names used in interactions downloaded from STRING (\url{https://string-db.org/}); two columns are necessary 'queryItem' and 'preferredName'}
\item{type}{character, defines how interactions will be reflected in the output matrix; \code{int} will result in a matrix whose entries equal 1 if interaction is present in the list of interactions \code{int} and 0 otherwise; \code{blacklist} results in a matrix whose entries equal 0 when interaction is present in the list of interactions and 1 otherwise;
\code{pf} results in a matrix results in a matrix whose entries equal 1 is interaction is present in the list of interactions \code{int} and \code{pf} otherwise$ "int" by default}
\item{pf}{penalization factor for interactions, needed if \code{type}=pf}
}
\value{
square matrix whose entries correspond to the list of interactions and parameter \code{type}
}
\description{
This transforms a list of possible interactions between proteins downloaded from STRING database
into a matrix which can be used for blacklisting/penalization in BiDAG.
}
\examples{
curnames<-colnames(kirp)
intmat<-string2mat(curnames, mapping, interactions, type="pf")
}
|
b92319744a3121763a722e40abf04cd56941d978
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ade4/examples/ungulates.Rd.R
|
2c45e9a0fdcb9c32b4c177828aebc7cd86e49887
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 905
|
r
|
ungulates.Rd.R
|
library(ade4)
### Name: ungulates
### Title: Phylogeny and quantitative traits of ungulates.
### Aliases: ungulates
### Keywords: datasets
### ** Examples
data(ungulates)
ung.phy <- newick2phylog(ungulates$tre)
plot(ung.phy,clabel.l=1.25,clabel.n=0.75)
ung.x <- log(ungulates$tab[,1])
ung.y <- log((ungulates$tab[,2]+ungulates$tab[,3])/2)
names(ung.x) <- names(ung.phy$leaves)
names(ung.y) <- names(ung.x)
plot(ung.x,ung.y)
abline(lm(ung.y~ung.x))
symbols.phylog(ung.phy,ung.x-mean(ung.x))
dotchart.phylog(ung.phy,ung.x,cle=1.5,cno=1.5,cdot=1)
if (requireNamespace("adephylo", quietly = TRUE) & requireNamespace("ape", quietly = TRUE)) {
tre <- ape::read.tree(text = ungulates$tre)
adephylo::orthogram(ung.x, tre)
ung.z <- residuals(lm(ung.y~ung.x))
names(ung.z) <- names(ung.phy$leaves)
dotchart.phylog(ung.phy,ung.z,cle=1.5,cno=1.5,cdot=1,ceti=0.75)
adephylo::orthogram(ung.z, tre)
}
|
879dd1d33b9bbeac9640fd01ef0ed76e640a8b13
|
79a54bdd930b0ff24bee12107a0f2d4ea0141a12
|
/man/prep_cell_info.Rd
|
f62cff2ecade6871d7d949bb89157c07d4cba1ca
|
[] |
no_license
|
abcwcm/Klebanoff21LT2
|
957ef1d19263f35653de9347fac24ac2b8da166b
|
6bb124f4d5d97fece322d422e533a3e898c43ce8
|
refs/heads/master
| 2023-04-13T23:20:27.592502
| 2022-03-03T11:26:43
| 2022-03-03T11:26:43
| 465,661,359
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 548
|
rd
|
prep_cell_info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrangling_sce.R
\name{prep_cell_info}
\alias{prep_cell_info}
\title{Add clonotype information to the colData of a SCE object}
\usage{
prep_cell_info(sce.object, clonotype_info = clono_info_wt)
}
\arguments{
\item{sce.object}{SCE object}
\item{clonotype_ino}{data.frame with the clonotype information for a given sample}
}
\value{
data.frame of the colData merged with the clonotype information
}
\description{
Add clonotype information to the colData of a SCE object
}
|
0e9f057c0459382be2128219109609c0a3ded81d
|
482ea7e84633220747229d91b9d9c03865b544b6
|
/ensembleMOS/man/ensembleMOSlognormal.Rd
|
67bc33bed54b60857822d582f173e3098d1fb6f0
|
[] |
no_license
|
Three-Poles/Reprocessing
|
b3a8aeb17e1c3efacf85fbb30b0e2f0134a54648
|
a849f369fb386c3238fd07851e5c1449d7946122
|
refs/heads/master
| 2020-08-09T19:51:09.654977
| 2019-10-14T07:53:27
| 2019-10-14T07:53:27
| 214,160,276
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,248
|
rd
|
ensembleMOSlognormal.Rd
|
\name{ensembleMOSlognormal}
\alias{ensembleMOSlognormal}
\title{
Log-normal EMOS modeling
}
\description{
Fits a log-normal EMOS model to ensemble forecasts for
specified dates.
}
\usage{
ensembleMOSlognormal(ensembleData, trainingDays, consecutive = FALSE,
dates = NULL, control = controlMOSlognormal(),
warmStart = FALSE, exchangeable = NULL)
}
\arguments{
\item{ensembleData}{
An \code{ensembleData} object including ensemble forecasts with
the corresponding verifying observations and their dates.
Missing values (indicated by \code{NA}) are allowed.
}
\item{trainingDays}{
An integer giving the number of time steps (e.g. days)
in the training period. There is no default.
}
\item{consecutive}{
If \code{TRUE} then the sequence of dates in the training set are
treated as consecutive, i.e. date gaps are ignored.
}
\item{dates}{
The dates for which EMOS forecasting models are desired.
By default, this will be all dates in \code{ensembleData}
for which modeling is allowed given the training rule.
}
\item{control}{
A list of control values for the fitting functions specified via the
function \link{controlMOStruncnormal}. For details
and default values, see \link{controlMOStruncnormal}.
}
\item{warmStart}{
If \code{TRUE}, then starting values for parameters in optimization
are set to the estimates of the preceding date's fit.
}
\item{exchangeable}{
A numeric or character vector or factor indicating groups of
ensemble members that are exchangeable (indistinguishable).
The modeling will have equal parameters within each group.
The default determines exchangeability from \code{ensembleData}.
}
}
\value{
A list with the following output components:
\item{training}{
A list containing information on the training length and lag and
the number of instances used for training for each modeling date.
}
\item{a}{
A vector of fitted EMOS intercept parameters for each date.
}
\item{B}{
A matrix of fitted EMOS coefficients for each date.
}
\item{c,d}{
The fitted parameters for the variance, see details.
}
}
\details{
Given an ensemble of size \eqn{m}: \eqn{X_1, \ldots , X_m}, the
following log-normal model is fit by \code{ensembleMOSlognormal}:
\deqn{Y ~ LN(\mu, \sigma)}
where \eqn{LN} denotes the log-normal distrbution with \code{meanlog}
parameter \eqn{\mu} and \code{scalelog} parameter \eqn{\sigma}, see
\link{Lognormal}. The model is parametrized such that the mean value of
the log-normal distribution is a linear function \eqn{a + b_1 X_1 + \ldots + b_m X_m}
of the ensemble forecats, and the variance is a linear function
\eqn{c + d S^2}. For transformations between \eqn{\mu, \sigma} and mean
and variance of the log-normal distribution, see Baran and Lerch (2015).
See \link{ensembleMOSlognormal} for details.
\code{B} is a vector of fitted regression coefficients: \eqn{b_1,
\ldots, b_m}. Specifically, \eqn{a, b_1,\ldots, b_m, c, d} are
fitted to optimize
\code{control$scoringRule} over the specified training period using
\code{optim} with \code{method = control$optimRule}.
}
\references{
S. Baran and S. Lerch, Log-normal distribution based Ensemble Model
Output Statistics models for probabilistic wind-speed forecasting.
\emph{Quarterly Journal of the Royal Meteorological Society} 141:2289--2299,
2015.
}
\seealso{
\code{\link{controlMOSlognormal}},
\code{\link{fitMOSlognormal}}
}
\examples{
data("ensBMAtest", package = "ensembleBMA")
ensMemNames <- c("gfs","cmcg","eta","gasp","jma","ngps","tcwb","ukmo")
obs <- paste("MAXWSP10","obs", sep = ".")
ens <- paste("MAXWSP10", ensMemNames, sep = ".")
windTestData <- ensembleData(forecasts = ensBMAtest[,ens],
dates = ensBMAtest[,"vdate"],
observations = ensBMAtest[,obs],
station = ensBMAtest[,"station"],
forecastHour = 48,
initializationTime = "00")
windTestFitLN <- ensembleMOSlognormal(windTestData, trainingDays = 25)
}
\keyword{models}
|
a98c9f52bf40fbe58f33e48b2854a95fcafbee2f
|
214c1b6017f13f571c309f7521ef1ee3ba7c48ec
|
/man/report_lm.Rd
|
683c9f8a9e85d27d19e2c858b4446d32543c7aa3
|
[
"MIT"
] |
permissive
|
a1trl9/svnc
|
be392560a5ed1e653f310614273b274c14ffa126
|
896b51e49929896cec272805f704764f73e93fa1
|
refs/heads/master
| 2020-09-22T11:30:33.845864
| 2019-12-01T14:40:53
| 2019-12-01T14:40:53
| 225,176,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 343
|
rd
|
report_lm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/report_lm.R
\name{report_lm}
\alias{report_lm}
\title{report_lm}
\usage{
report_lm(data, x, y)
}
\arguments{
\item{x}{key of independent variable}
\item{y}{key of dependent variable}
\item{df}{dataframe, source of plot}
}
\value{
None
}
\description{
report_lm
}
|
20b576e5a53a5b80921208d52380cdca1e07ef67
|
a68fcf7bad70e91af4b398df8bee04b9b0bda82e
|
/S34_S38_phylogenetic_comparative_methods/scripts/resources/slouch/R/regimes.R
|
f7ae897b48dc47cba38e777534b515177f0f0ff3
|
[] |
no_license
|
hj1994412/teleost_genomes_immune
|
44aac06190125b4dea9533823b33e28fc34d6b67
|
50f1552ebb5f19703b388ba7d5517a3ba800c872
|
refs/heads/master
| 2021-03-06T18:24:10.316076
| 2016-08-27T10:58:39
| 2016-08-27T10:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
regimes.R
|
`regimes` <-
function (topology, times, regime.specs, term) {
N <- length(term);
reg <- set.of.regimes(topology,regime.specs);
R <- length(reg);
beta <- vector(R*N, mode="list");
for (i in 1:N) {
for (k in 1:R) {
p <- pedigree(topology, term[i]);
n <- length(p);
beta[[i + N*(k-1)]] <- as.integer(regime.specs[p[1:(n-1)]] == reg[k]);
}
}
return(beta);
}
|
5931775192904f5e67dbfcb9a7e55cc812b734cc
|
de1ab8c0db8b5fb7092953ff423e2bc2172f2a07
|
/make_LANDSAT8_layers_for_extraction.R
|
3e9fe46a691d91f4275ef2e0530b1dd5f8713ab2
|
[] |
no_license
|
ranalut/ChicagoGrasslandBirds
|
244bc88bfbd50b0d23f2881818f7b4212c552b16
|
b373c7e496f22d1c37c3fd57f58b6b185b2affa8
|
refs/heads/master
| 2020-05-26T10:46:20.318317
| 2015-04-01T05:09:34
| 2015-04-01T05:09:34
| 15,231,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,150
|
r
|
make_LANDSAT8_layers_for_extraction.R
|
### make LANDSAT 8 grids for extraction and prediction
library(raster)
setwd("D:/Chicago_Grasslands/BIRD_DATA/BCN")
all<-read.csv("31qryBreeding_JGS_version.csv", header=T)
## trim observations to those meeting requirements
obs<-all[all$VALID==1&all$PROTOCOL_ID=="P21"&all$DURATION_HRS==0.08&all$ALL_OBS_REPORTED==1,]
## make unique list of count submissions
counts<-aggregate(VALID~SUB_ID+JHOUR+JDATE+YEAR+LATITUDE+LONGITUDE, obs, length)
hist(counts$YEAR, xlab="year", ylab="frequency", main="5-minute point counts by year")
hist(counts$JDATE, xlab="julian day", ylab="frequency", main="5-minute point counts by day of year")
hist(counts$JDATE, xlab="julian day", ylab="frequency", main="5-minute point counts by day of year")
## select observations for species of interest
sppname<-paste("graspa")
spp<-obs[obs$SPECIES_CODE==sppname,]
## join to set of unique submissions
spp_PA<-merge(spp, counts, by=c("SUB_ID", "JHOUR", "JDATE", "LATITUDE", "LONGITUDE"), all=T)
i<-is.na(spp_PA$HOW_MANY_ATLEAST)
spp_PA$HOW_MANY_ATLEAST[i]<-0
spp_PA<-spp_PA[,-(6:9)]
spp_PA<-spp_PA[,-(7:30)]
write.csv(spp_PA,paste(sppname, "_PA.csv", sep=""), row.names=F)
setwd("D:/Chicago_Grasslands/PREDICTIONS/GRIDS")
B1<-raster("LT50230312009140PAC01_B1.tif")
B2<-raster("LT50230312009140PAC01_B2.tif")
B3<-raster("LT50230312009140PAC01_B3.tif")
B4<-raster("LT50230312009140PAC01_B4.tif")
B5<-raster("LT50230312009140PAC01_B5.tif")
B6<-raster("LT50230312009140PAC01_B6.tif")
B7<-raster("LT50230312009140PAC01_B7.tif")
writeRaster(B1,"B1.tif")
writeRaster(B2,"B2.tif")
writeRaster(B3,"B3.tif")
writeRaster(B4,"B4.tif")
writeRaster(B5,"B5.tif")
writeRaster(B6,"B6.tif")
writeRaster(B7,"B7.tif")
B1ngb<-raster("LT50230312009140PAC01_B1ngb.tif")
B2ngb<-raster("LT50230312009140PAC01_B2ngb.tif")
B3ngb<-raster("LT50230312009140PAC01_B3ngb.tif")
B4ngb<-raster("LT50230312009140PAC01_B4ngb.tif")
B5ngb<-raster("LT50230312009140PAC01_B5ngb.tif")
B6ngb<-raster("LT50230312009140PAC01_B6ngb.tif")
B7ngb<-raster("LT50230312009140PAC01_B7ngb.tif")
writeRaster(B1ngb,"B1ngb.tif")
writeRaster(B2ngb,"B2ngb.tif")
writeRaster(B3ngb,"B3ngb.tif")
writeRaster(B4ngb,"B4ngb.tif")
writeRaster(B5ngb,"B5ngb.tif")
writeRaster(B6ngb,"B6ngb.tif")
writeRaster(B7ngb,"B7ngb.tif")
### make into blocky grid
B1a<-aggregate(B1, fact=100, fun=mean)
B2a<-aggregate(B2, fact=100, fun=mean)
B3a<-aggregate(B3, fact=100, fun=mean)
B4a<-aggregate(B4, fact=100, fun=mean)
B5a<-aggregate(B5, fact=100, fun=mean)
B6a<-aggregate(B6, fact=100, fun=mean)
B7a<-aggregate(B7, fact=100, fun=mean)
writeRaster(B1a,"LT50230312009140PAC01_B1a.tif")
writeRaster(B2a,"LT50230312009140PAC01_B2a.tif")
writeRaster(B3a,"LT50230312009140PAC01_B3a.tif")
writeRaster(B4a,"LT50230312009140PAC01_B4a.tif")
writeRaster(B5a,"LT50230312009140PAC01_B5a.tif")
writeRaster(B6a,"LT50230312009140PAC01_B6a.tif")
writeRaster(B7a,"LT50230312009140PAC01_B7a.tif")
## run BRT
library(mgcv)
library(dismo)
library(pROC)
library(gbm)
library(ROCR)
library(rgdal)
library(tcltk)
mydata<-read.csv(paste("D:/Chicago_Grasslands/BIRD_DATA/BCN/", sppname, "_PA_for_BRT.csv", sep=""), header=T)
spp.tc5.lr01<-gbm.step(data=mydata, gbm.x=9:24, gbm.y=4, family="poisson", tree.complexity=5, learning.rate=0.01, bag.fraction=0.5)
save(spp.tc5.lr01,file=paste("D:/Chicago_Grasslands/BIRD_DATA/BCN/", sppname, "_BRT", sep="")
## make predictions
setwd("D:/Chicago_Grasslands/PREDICTIONS/GRIDS")
JDATE<-raster("JDATE.tif")
JHOUR<-raster("JHOUR.tif")
B1ngb<-raster("B1ngb.tif")
B2ngb<-raster("B2ngb.tif")
B3ngb<-raster("B3ngb.tif")
B4ngb<-raster("B4ngb.tif")
B5ngb<-raster("B5ngb.tif")
B6ngb<-raster("B6ngb.tif")
B7ngb<-raster("B7ngb.tif")
B1<-raster("B1.tif")
B2<-raster("B2.tif")
B3<-raster("B3.tif")
B4<-raster("B4.tif")
B5<-raster("B5.tif")
B6<-raster("B6.tif")
B7<-raster("B7.tif")
chic<-stack(B1,B2,B3,B4,B5,B6,B7,B1ngb,B2ngb,B3ngb,B4ngb,B5ngb,B6ngb,B7ngb,JDATE,JHOUR)
p<-predict(chic, spp.tc5.lr01, n.trees = spp.tc5.lr01$n.trees, type="response", progress="window", na.rm=TRUE)
writeRaster(p, paste("D:/Chicago_Grasslands/PREDICTIONS/", sppname, "_TM5.tif", sep=""))
#### NASS
setwd("D:/Chicago_Grasslands/BIRD_DATA/BCN")
sppname<-paste("easmea")
library(mgcv)
library(dismo)
library(pROC)
library(gbm)
library(ROCR)
library(rgdal)
library(tcltk)
mydata<-read.csv(paste("D:/Chicago_Grasslands/BIRD_DATA/BCN/", sppname, "_PA_for_NASS_BRT.csv", sep=""), header=T)
spp.tc5.lr01<-gbm.step(data=mydata, gbm.x=9:28, gbm.y=4, family="poisson", tree.complexity=5, learning.rate=0.01, bag.fraction=0.5)
save(spp.tc5.lr01, file=paste("D:/Chicago_Grasslands/BIRD_DATA/BCN/", sppname, "_NASS_BRT", sep=""))
## make predictions
setwd("D:/Chicago_Grasslands/PREDICTIONS/NASS_VARIABLES")
JDATE<-raster("JDATE.tif")
JHOUR<-raster("JHOUR.tif")
V12_100<-raster("V12_100.tif")
V12_500<-raster("V12_500.tif")
dec12ngb<-raster("dec12ngb.tif")
devmh12ngb<-raster("devmh12ngb.tif")
devol12ngb<-raster("devol12ngb.tif")
wat12ngb<-raster("wat12ngb.tif")
wowe12ngb<-raster("wowe12ngb.tif")
paal12ngb<-raster("paal12ngb.tif")
hergr12ngb<-raster("hergr12ngb.tif")
oth12ngb<-raster("oth12ngb.tif")
decid12<-raster("decid12.tif")
devmh12<-raster("devmh12.tif")
devol12<-raster("devol12.tif")
wat12<-raster("wat12.tif")
wowe12<-raster("wowe12.tif")
pasalf12<-raster("pasalf12.tif")
hergr12<-raster("hergr12.tif")
oth12<-raster("oth12.tif")
chic<-stack(decid12,devmh12,devol12,wat12,wowe12,pasalf12,oth12,hergr12,dec12ngb,devmh12ngb,devol12ngb,wat12ngb,wowe12ngb,paal12ngb,oth12ngb,hergr12ngb,V12_100,V12_500,JDATE,JHOUR)
p<-predict(chic, spp.tc5.lr01, n.trees = spp.tc5.lr01$n.trees, type="response", progress="window", na.rm=TRUE)
writeRaster(p, paste("D:/Chicago_Grasslands/PREDICTIONS/", sppname, "_NASS.tif", sep=""))
|
b1def1ea2a31f161e695a7ed48121f1e9bb475d5
|
9449f53d55603e4ce50ce8dc7275dcf3af9fa00f
|
/pollen-on-silk/load-genes.R
|
9aac01c768dddb195364abf4fb279d3659244b1d
|
[] |
no_license
|
carnegie-dpb/evanslab-java
|
96cd835a87bea453278baf3d04a6460879aa32bb
|
fd26166340a911c74676eafa1b9dcd97502d72f4
|
refs/heads/master
| 2021-07-25T14:27:25.427832
| 2021-06-24T17:39:01
| 2021-06-24T17:39:01
| 79,609,668
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 359
|
r
|
load-genes.R
|
##
## load data from an alleles to genes file
##
## params
sourceAltTotalMin=10
sourceAltReadRatioMin=0.5
sourceRefFractionMax=0.1
targetRefFractionMin=0.9
genes = read.table("pollen-genes-10-0.1-0.9.tsv", header=T, sep="\t")
genes$SrcRefTot = genes$SrcRF + genes$SrcRR
genes$SrcAltTot = genes$SrcAF + genes$SrcAR
genes$GeneID = substring(genes$GeneID,6)
|
d3a5d81c3e70567a671b4bc8e921d98784bb69e1
|
89cd9cf8c4124d1a9dd5803d94af1eeee158b05e
|
/Old Files/my functions.R
|
274f6b4abb5f69326d6417e763fe471180128c90
|
[] |
no_license
|
NicolaPilia/Thesis
|
1c7c4b69e9eec1b2072b7e84c07f76a11bcea303
|
077d850b5665d4e31a8992d13f4f1f73bd0af62d
|
refs/heads/master
| 2022-12-07T02:38:16.431305
| 2020-09-02T17:36:07
| 2020-09-02T17:36:07
| 276,679,617
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
r
|
my functions.R
|
#My functions
#for preparing data for Confusion Matrix
conf_clean<-function(x){
x<-ifelse(x>0.5,1,0)%>%factor()
x<-as.vector(x)
x[is.na(x)] <-0
x<-as.factor(x)
}
complete_analysis<-function(y,z){
#creating the corpus
library(tm)
corp<-Corpus(VectorSource(y))
#tokenization
corp<-tm_map(corp,stripWhitespace)
corp<-tm_map(corp, removePunctuation)
corp<-tm_map(corp, removeNumbers)
#stopwords
corp<-tm_map(corp, removeWords,stopwords("SMART"))
#stemming
corp<-tm_map(corp,stemDocument)
#TF-IDF and latent semantic analysis
tdm<-TermDocumentMatrix(corp)
tfidf<-weightTfIdf(tdm)
library(lsa)
lsa.tfidf<-lsa(tfidf,dim=z)
words.df<-as.data.frame(as.matrix(lsa.tfidf$dk))
}
|
023686f364479aa6655cee4b95d4e79726bb23f0
|
c9420d71ce4b56c91a4c1326be3b79c07bddb428
|
/data-raw/sig_exp.R
|
3a7676b82916c142193aed7091784056a99f34c2
|
[] |
no_license
|
SiYangming/TSGene
|
7deaa27375a5ef7427374a791d38a218e4aca146
|
bca0a8f47071b5454a27823333a021c44a12a2be
|
refs/heads/master
| 2023-02-23T18:46:03.289378
| 2021-02-06T18:04:24
| 2021-02-06T18:04:24
| 336,546,963
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
sig_exp.R
|
library(readr)
library(dplyr)
### All the precomputed P-values for gene expression difference between tumor
### and normal samples for all human tumor suppressor genes in pan-cancer
raw <- read_tsv("https://bioinfo.uth.edu/TSGene/sig_exp.txt")
sig_exp <- raw
save(sig_exp, file = "data/sig_exp.rda")
write_tsv(sig_exp, file = "data-raw/sig_exp.tsv")
|
38a711f2aeb830edbe0b7a2b3268b8f0817388ad
|
b926f0ac08bfe1b7c0feb654849cbdc70330d462
|
/man/print_rdfa.Rd
|
83928908081da26c3bb73ec705680fd5d6b8fcf8
|
[
"CC0-1.0"
] |
permissive
|
hpiwowar/knitcitations
|
2157e0c94c376dc5a539996c1b472310d0ae0a9d
|
97456fe4fa138eac68dc4e242500bf9fe8c4012c
|
refs/heads/master
| 2021-01-17T22:50:40.657655
| 2013-02-11T19:51:22
| 2013-02-11T19:51:22
| 8,145,133
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
rd
|
print_rdfa.Rd
|
\name{print_rdfa}
\alias{print_rdfa}
\title{format for rdfa markup of references}
\usage{
print_rdfa(bib)
}
\arguments{
\item{bib}{a bibentry object}
}
\description{
format for rdfa markup of references
}
\keyword{internal}
|
d91ebec91a7ef0bb4049dfff4e36c41259ed8487
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/OpenMx/examples/mxFitFunctionAlgebra.Rd.R
|
8f0c79a4b7ffaad6838f3580bf7d056af90170cd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 894
|
r
|
mxFitFunctionAlgebra.Rd.R
|
library(OpenMx)
### Name: mxFitFunctionAlgebra
### Title: Create MxFitFunctionAlgebra Object
### Aliases: mxFitFunctionAlgebra MxFitFunctionAlgebra-class
### print,MxFitFunctionAlgebra-method show,MxFitFunctionAlgebra-method
### ** Examples
# Create and fit a very simple model that adds two numbers using mxFitFunctionAlgebra
library(OpenMx)
# Create a matrix 'A' with no free parameters
A <- mxMatrix('Full', nrow = 1, ncol = 1, values = 1, name = 'A')
# Create an algebra 'B', which defines the expression A + A
B <- mxAlgebra(A + A, name = 'B')
# Define the objective function for algebra 'B'
objective <- mxFitFunctionAlgebra('B')
# Place the algebra, its associated matrix and
# its objective function in a model
tmpModel <- mxModel(model="Addition", A, B, objective)
# Evalulate the algebra
tmpModelOut <- mxRun(tmpModel)
# View the results
tmpModelOut$output$minimum
|
e0bb090666775148b3893046bdc5c633f3519449
|
2b2aee3352f8a10c121fe74036eddec01b3ee595
|
/man/initializeMutationRate.Rd
|
017d44699b6d638c52c1d409c68924623e0d0a43
|
[
"MIT"
] |
permissive
|
rdinnager/slimr
|
56f1fef0a83198bce292dd92dc1014df87c2d686
|
e2fbb7115c7cca82dabd26dc6560e71a8cd0958b
|
refs/heads/master
| 2023-08-21T14:00:36.089104
| 2023-07-31T03:11:09
| 2023-07-31T03:11:09
| 226,999,099
| 8
| 1
|
NOASSERTION
| 2023-08-03T05:44:32
| 2019-12-10T01:04:16
|
R
|
UTF-8
|
R
| false
| true
| 5,828
|
rd
|
initializeMutationRate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{initializeMutationRate}
\alias{initializeMutationRate}
\alias{Initialize$initializeMutationRate}
\alias{.Init$initializeMutationRate}
\title{SLiM method initializeMutationRate}
\usage{
initializeMutationRate(rates, ends, sex)
}
\arguments{
\item{rates}{An object of type numeric. See details for description.}
\item{ends}{An object of type null or integer. The default value is \code{NULL}.
See details for description.}
\item{sex}{An object of type string. Must be of length 1 (a singleton). The
default value is \code{"*"}. See details for description.}
}
\value{
An object of type void.
}
\description{
Documentation for SLiM function \code{initializeMutationRate}, which is a method
of the SLiM class \code{\link{Initialize}}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Documentation for this function can be found in the official
\href{http://benhaller.com/slim/SLiM_Manual.pdf#page=622}{SLiM manual: page
622}.
Set the mutation rate per base position per gamete. To be precise, this
mutation rate is the expected mean number of mutations that will occur per base
position per gamete; note that this is different from how the recombination rate
is defined (see initializeRecombinationRate()). The number of mutations that
actually occurs at a given base position when generating an offspring genome
is, in effect, drawn from a Poisson distribution with that expected mean (but
under the hood SLiM uses a mathematically equivalent but much more efficient
strategy). It is possible for this Poisson draw to indicate that two or more new
mutations have arisen at the same base position, particularly when the mutation
rate is very high; in this case, the new mutations will be added to the site
one at a time, and as always the mutation stacking policy (see section 1.5.3)
will be followed. There are two ways to call this function. If the optional
ends parameter is NULL (the default), then rates must be a singleton value
that specifies a single mutation rate to be used along the entire chromosome.
If, on the other hand, ends is supplied, then rates and ends must be the same
length, and the values in ends must be specified in ascending order. In that
case, rates and ends taken together specify the mutation rates to be used along
successive contiguous stretches of the chromosome, from beginning to end; the
last position specified in ends should extend to the end of the chromosome
(i.e. at least to the end of the last genomic element, if not further). For
example, if the following call is made: initializeMutationRate(c(1e-7, 2.5e-8),
c(5000, 9999)); then the result is that the mutation rate for bases 0...5000
(inclusive) will be 1e-7, and the rate for bases 5001...9999 (inclusive) will be
2.5e-8. Note that mutations are generated by SLiM only within genomic elements,
regardless of the mutation rate map. In effect, the mutation rate map given
is intersected with the coverage area of the genomic elements defined; areas
outside of any genomic element are given a mutation rate of zero. There is no
harm in supplying a mutation rate map that specifies rates for areas outside
of the genomic elements defined; that rate information is simply not used.
The overallMutationRate family of properties on Chromosome provide the overall
mutation rate after genomic element coverage has been taken into account, so it
will reflect the rate at which new mutations will actually be generated in the
simulation as configured. If the optional sex parameter is "*" (the default),
then the supplied mutation rate map will be used for both sexes (which is the
only option for hermaphroditic simulations). In sexual simulations sex may be
"M" or "F" instead, in which case the supplied mutation rate map is used only
for that sex (i.e., when generating a gamete from a parent of that sex). In this
case, two calls must be made to initializeMutationRate(), one for each sex, even
if a rate of zero is desired for the other sex; no default mutation rate map
is supplied. In nucleotide-based models, initializeMutationRate() may not be
called. Instead, the desired sequence-based mutation rate(s) should be expressed
in the mutationMatrix parameter to initializeGenomicElementType(). If variation
in the mutation rate along the chromosome is desired, initializeHotspotMap()
should be used.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016-2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\seealso{
Other Initialize:
\code{\link{Init}},
\code{\link{initializeAncestralNucleotides}()},
\code{\link{initializeGeneConversion}()},
\code{\link{initializeGenomicElementType}()},
\code{\link{initializeGenomicElement}()},
\code{\link{initializeHotspotMap}()},
\code{\link{initializeInteractionType}()},
\code{\link{initializeMutationTypeNuc}()},
\code{\link{initializeMutationType}()},
\code{\link{initializeRecombinationRate}()},
\code{\link{initializeSLiMModelType}()},
\code{\link{initializeSLiMOptions}()},
\code{\link{initializeSex}()},
\code{\link{initializeSpecies}()},
\code{\link{initializeTreeSeq}()}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
\concept{Initialize}
|
b24dec986ce192d459b3c6f2a0ba025d5b17ee17
|
385bb86445590ecd4d6eaab510f55585861b9fcb
|
/R/ugomquantreg-package.R
|
0fe1d0a0d2aa6217b447fc00332d37f4d03b7272
|
[] |
no_license
|
cran/ugomquantreg
|
fbe74a719bcd2479f550d93ef0b2395f0540f585
|
819888fcee801c3f1b24b25fda45d72f91ff1009
|
refs/heads/master
| 2023-06-06T03:41:47.153531
| 2021-06-28T05:40:02
| 2021-06-28T05:40:02
| 381,083,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,019
|
r
|
ugomquantreg-package.R
|
#' @docType package
#' @name ugomquantreg-package
#' @aliases ugomquantreg-package
#'
#' @title Overview of the ugomquantreg package
#'
#' @description The \pkg{ugomquantreg} package implements the probability density function, quantile function, cumulative distribution function and random number generation function for unit-Gompertz distribution parameterized as a function of its \eqn{\tau}-th quantile, \eqn{0 <\tau<1}. Some function are written in \proglang{C++} using \pkg{Rcpp}.
#'
#' @details
#'
#' \code{\link[ugomquantreg]{ammonia}}: Ammonia oxidized to acid nitric data set.
#'
#' \code{\link[ugomquantreg]{bodyfat}}: Body fat data set.
#'
#' \code{\link[ugomquantreg]{UGOM}}: For quantile modeling (con/in)ditional on covariate(s).
#'
#' @author Josmar Mazucheli \email{jmazucheli@gmail.com}
#'
#' @author Bruna Alves \email{pg402900@uem.br}
#'
#'
#' @useDynLib ugomquantreg
#' @importFrom Rcpp sourceCpp
NULL
.onUnload <- function (libpath) {
library.dynam.unload("ugomquantreg", libpath)
}
|
5c7863da524a1de2e9a1c86947efd50c79334cba
|
850a375ce77a10f2e52e46c4ff4229c45e0798e1
|
/src/tt_Post.R
|
deabfa9fb6782f44086231b82db39af198b683ff
|
[] |
no_license
|
josephedwardoreilly/TidyTuesday
|
e7d1f04468aa6c6202f4661e4df71a03e7c1ef1e
|
68e7a1489b097f1160a60d0792b884d81ec0ad9d
|
refs/heads/master
| 2023-07-15T21:46:33.986887
| 2021-08-25T19:41:30
| 2021-08-25T19:41:30
| 354,812,775
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,420
|
r
|
tt_Post.R
|
require(data.table)
require(ggplot2)
require(viridis)
require(ggtext)
# Load the data
tidy.week <- '2021-04-13'
tuesdata <- tidytuesdayR::tt_load(tidy.week)
# Data Wrangling/Prep -----------------------------------------------------
x <- data.table(tuesdata$post_offices)
x <- x[, .(name, established, discontinued, state)]
# clean data a bit
x <- x[established > 1400 &
#!is.na(established) &
discontinued < 2001 &
discontinued > 1400 &
state == 'NY']
# Number of openings/closures by year
z <- merge(
x[, .(new = .N), by = .(Y = established)],
x[, .(closed = .N), by = .(Y = discontinued)],
by = 'Y',
all.x = TRUE,
all.y = TRUE)
z[is.na(new), new := 0]
z[is.na(closed), closed := 0]
z[, net := new - closed]
z <- z[Y > 1799 & Y < 1951]
# Plotting ----------------------------------------------------------------
bg <- 'grey90'
# Main title data
title.d <- data.table(label = 'New York State Post Office Closures',
x = 1905, y = 1.25)
# label data
label.d <- data.table(label = 'Rural Free Delivery was instituted at the turn of the 20th century; this resulted in the mass closure of post offices as rural residents began to receive mail delivered to their door.',
x = 1905, y = .9)
# Handmad x-axis data
label.x <- data.table(label = c(1825, 1875, 1925),
x = c(1825, 1875, 1925),
y = rep(1.475, 3))
ggplot(
z,
aes(x = Y, y = 1, fill = net))+
geom_tile(stat = 'identity', height = 1) +
scale_fill_viridis(option = 'A') +
geom_textbox(
data = title.d,
aes(x, y, label = label),
inherit.aes = FALSE,
hjust = 0,
size = 12.5,
fill = NA,
width = grid::unit(0.25, "npc"),
box.color = NA,
color = "white",
family = 'Apercu Pro',
alpha = 0.95) +
geom_textbox(
data = label.d,
aes(x, y, label = label),
inherit.aes = FALSE,
hjust = 0,
size = 7.5,
fill = NA,
width = grid::unit(0.2, "npc"),
box.color = NA,
color = "grey90",
family = 'Apercu Pro',
alpha = 0.75) +
geom_textbox(
data = label.x,
aes(x, y, label = label),
inherit.aes = FALSE,
hjust = 0,
size = 6.25,
fill = NA,
width = grid::unit(0.2, "npc"),
box.color = NA,
color = "grey20",
family = 'Apercu Pro',
alpha = 0.75) +
labs(caption = "Visualisation by Joe O'Reilly (github.com/josephedwardoreilly)\nData from TidyTuesday via Cameron and Helbock 2021 - https://doi.org/10.7910/DVN/NUKCNA") +
theme_void() +
guides(
fill = guide_colourbar(
barwidth = 30,
barheight = 0.5,
ticks = FALSE,
title.hjust = 0.5,
title = 'Net Change In Number Of Operating Post Offices',
title.position = 'bottom',
label.theme = element_text(family = 'Apercu Pro', color = 'grey50'),
title.theme = element_text(family = 'Apercu Pro', color = 'grey50'))) +
theme(
panel.background = element_rect(color = bg, fill = bg),
plot.background = element_rect(fill = bg, color = bg),
plot.margin = margin(c(10, 10 , 10, 10)),
panel.border = element_blank(),
plot.caption = element_text(family = 'Apercu Pro', color = 'grey20'),
plot.caption.position = 'plot',
legend.position = 'bottom') +
ggsave(
filename = here::here('plots', paste0(tidy.week, '.png')),
width = 20,
height = 7,
device = 'png')
|
21e65797480a01ba5a9bbe77d500a0fb553f31d0
|
edee4a9c4cf3c35a52dfc99ac53279ab23e069ab
|
/examples/FeatureCollection/idw_interpolation.R
|
45ed6515a63ce1d4c0c4be88598736795df9ddba
|
[
"Apache-2.0"
] |
permissive
|
benardonyango/rgee
|
a8dd22a72f2c77a0d1e88f6177c740942fe2cfbc
|
e9e0f2fa7065e79c1c794bd7387fd0af633031ff
|
refs/heads/master
| 2022-04-09T18:10:23.689798
| 2020-03-31T10:56:00
| 2020-03-31T10:56:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,904
|
r
|
idw_interpolation.R
|
library(rgee)
# ee_reattach() # reattach ee as a reserved word
ee_Initialize()
sampling <- function(sample) {
lat = sample$get('latitude')
lon = sample$get('longitude')
ch4 = sample$get('ch4')
ee$Feature(ee$Geometry$Point(c(lon, lat)),list(ch4 = ch4))
}
# Import two weeks of S5P methane and composite by mean.
ch4 = ee$ImageCollection('COPERNICUS/S5P/OFFL/L3_CH4')$
select('CH4_column_volume_mixing_ratio_dry_air')$
filterDate('2019-08-01', '2019-08-15')$
mean()$
rename('ch4')
# Define an area to perform interpolation over.
aoi = ee$Geometry$Polygon(
coords = list(
c(-95.68487605978851, 43.09844605027055),
c(-95.68487605978851, 37.39358590079781),
c(-87.96148738791351, 37.39358590079781),
c(-87.96148738791351, 43.09844605027055)
),
geodesic = TRUE
)
# Sample the methane composite to generate a FeatureCollection.
samples = ch4$addBands(ee$Image$pixelLonLat())$
sample(
region = aoi,
numPixels = 1500,
scale = 1000,
projection = 'EPSG:4326')$
map(sampling)
# Combine mean and standard deviation reducers for efficiency.
combinedReducer = ee$Reducer$mean()$
combine(
reducer2 = ee$Reducer$stdDev(),
sharedInputs = TRUE)
# Estimate global mean and standard deviation from the points.
stats = samples$reduceColumns(
reducer = combinedReducer,
selectors = list('ch4'))
# Do the interpolation, valid to 70 kilometers.
interpolated = samples$inverseDistance(
range = 7e4,
propertyName = 'ch4',
mean = stats$get('mean'),
stdDev = stats$get('stdDev'),
gamma = 0.3
)
# Define visualization arguments.
band_viz = list(
min = 1800,
max = 1900,
palette = c(
'0D0887', '5B02A3',
'9A179B', 'CB4678',
'EB7852', 'FBB32F',
'F0F921'
)
)
# Display to map.
# Map.centerObject(ee.FeatureCollection(aoi), 7)
Map$addLayer(ch4, band_viz, 'CH4')
# Map.addLayer(interpolated, band_viz, 'CH4 Interpolated')
|
6c28c5a981e123e133de8cde4af90dc6bcfd67df
|
3d3dca4a42b1777c13108e03ca0e3bfcd9a93205
|
/Portfolios/O_CS_load.R
|
0420b9574086b6f5bdee130e269a29cf318222e9
|
[] |
no_license
|
BioAimie/AnalyticsWebHub
|
90afaaf4abf09ef0799ddaa5bd762233975f2e38
|
4f650049cdbfb2ce41ab072655878a5a7a12d5e7
|
refs/heads/master
| 2021-03-27T12:31:24.229879
| 2017-07-27T16:50:14
| 2017-07-27T16:50:14
| 74,378,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
r
|
O_CS_load.R
|
library(RODBC)
# Open the connection to PMS1
PMScxn <- odbcConnect("PMS_PROD")
queryText <- readLines("SQL/O_CS_StockLevels.sql")
query <- paste(queryText,collapse="\n")
stockInv.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_RefurbShipments.sql")
query <- paste(queryText,collapse="\n")
refurbShip.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_ServiceTier.sql")
query <- paste(queryText,collapse="\n")
tier.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_RMAInfo.sql")
query <- paste(queryText,collapse="\n")
rmas.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_ComplaintsSummary.sql")
query <- paste(queryText,collapse="\n")
complaints.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_LoanerRMA.sql")
query <- paste(queryText,collapse="\n")
loaners.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_CustomerAccounts.sql")
query <- paste(queryText,collapse="\n")
acct.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_ServiceCenterReceived.sql")
query <- paste(queryText,collapse="\n")
serviceCenter.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_LoanerRMAReceived.sql")
query <- paste(queryText,collapse="\n")
loanerReceipt.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_TradeUpRMAReceived.sql")
query <- paste(queryText,collapse="\n")
tradeupReceipt.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_CustPouchesShippedYear.sql")
query <- paste(queryText,collapse="\n")
custPouches.df <- sqlQuery(PMScxn,query)
queryText <- readLines("SQL/O_CS_CustomerComplaints.sql")
query <- paste(queryText,collapse="\n")
custComplaints.df <- sqlQuery(PMScxn,query)
odbcClose(PMScxn)
|
5dd82baa915e78eae6fc73850200319b9272b5c1
|
beba131dedd39cfdd6e18f45e5edcc1d23ee037e
|
/plot6.R
|
a2b044ae6be1cdd8412aadad2b019a6c15058efa
|
[] |
no_license
|
taserian/ExDataProject2
|
23f4952b2cd46548f9a8739ab90d154c1fb508f8
|
afb94c2cbfa6a32d68cc86bf0c538c40316407ef
|
refs/heads/master
| 2021-01-10T02:54:02.821644
| 2015-05-24T16:42:45
| 2015-05-24T16:42:45
| 36,149,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,302
|
r
|
plot6.R
|
# Compare emissions from motor vehicle sources in Baltimore City with emissions
# from motor vehicle sources in Los Angeles County, California (fips ==
# "06037"). Which city has seen greater changes over time in motor vehicle
# emissions?
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
vehicles <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- SCC[vehicles,]$SCC
vehiclesData <- NEI[NEI$SCC %in% vehiclesSCC,]
vehiclesBaltimore <- vehiclesData[vehiclesData$fips == 24510,]
vehiclesBaltimore$city <- "Baltimore City"
vehiclesLosAng <- vehiclesData[vehiclesData$fips=="06037",]
vehiclesLosAng$city <- "Los Angeles County"
citiesData <- rbind(vehiclesBaltimore, vehiclesLosAng)
png(filename = "plot6.png",
width = 480, height = 480, units = "px", pointsize = 14,
bg = "white", res = NA)
library(ggplot2)
comparisonPlot <- ggplot(citiesData, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(aes(fill=year), stat="identity") +
facet_grid(scales="free", space="free", .~city) +
guides(fill=FALSE) +
theme_bw() +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
print(comparisonPlot)
dev.off()
|
688692cdf7cdcac8916974f60f55f5255c587a3e
|
56b32941415e9abe063d6e52754b665bf95c8d6a
|
/R-Portable/App/R-Portable/library/igraph/tests/test_get.diameter.R
|
ca4456b2fd613bfc3d6531345bd57903e7af963c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"GPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"MIT"
] |
permissive
|
voltek62/seo-viz-install
|
37ed82a014fc36e192d9a5e5aed7bd45327c8ff3
|
e7c63f4e2e4acebc1556912887ecd6a12b4458a0
|
refs/heads/master
| 2020-05-23T08:59:32.933837
| 2017-03-12T22:00:01
| 2017-03-12T22:00:01
| 84,758,190
| 1
| 0
|
MIT
| 2019-10-13T20:51:49
| 2017-03-12T21:20:14
|
C++
|
UTF-8
|
R
| false
| false
| 516
|
r
|
test_get.diameter.R
|
context("get_diameter")
test_that("get_diameter works", {
library(igraph)
g <- make_ring(10)
E(g)$weight <- sample(seq_len(ecount(g)))
d <- diameter(g)
gd <- get_diameter(g)
sp <- distances(g)
expect_that(d, equals(max(sp)))
expect_that(sp[ gd[1], gd[length(gd)] ], equals(d))
d <- diameter(g, weights=NA)
gd <- get_diameter(g, weights=NA)
sp <- distances(g, weights=NA)
expect_that(d, equals(max(sp)))
length(gd) == d + 1
expect_that(sp[ gd[1], gd[length(gd)] ], equals(d))
})
|
339966515601565b190ab6a1a3ccbeb85b656d5e
|
331d29945b9a4dfa95ae3787702935c9f89dd22a
|
/src/plot_data/NHC_map_multiple_versions.R
|
d4b1c0585075cec96200d5e0374bb2a5ed41a1e9
|
[] |
no_license
|
amcarter/NHC_2019_metabolism
|
80647234ab4e68f788d7589511f90108903eb5ca
|
466b96027e751dfb1d314ee04f0d7ab2697abf2d
|
refs/heads/master
| 2023-06-22T05:58:48.498863
| 2021-07-23T04:00:07
| 2021-07-23T04:00:07
| 255,482,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,800
|
r
|
NHC_map_multiple_versions.R
|
################################
# Make Map of NHC watershed with different subsets of sites for presentation
# A Carter
# 3.27.2021
library(nhdplusTools)
library(tidyverse)
library(sf)
library(tmap)
library(rgdal)
library(maps)
library(rgee)
setwd('C:/Users/Alice Carter/Dropbox (Duke Bio_Ea)/projects/hall_50yl2/NHC_2019_metabolism')
# Get sample station locations
syn_sites <- read.csv("data/map_files/NC_synoptic_site_coordinates.csv",
header=T, stringsAsFactors = F)
upper_sites <- read.csv('data/map_files/NHCsite_50yl_coordinates.csv',
header = T, stringsAsFactors = F)
syn_sites_sf <- st_as_sf(syn_sites[c(1:13),], coords=c("Long","Lat"),
remove=F, crs=4326)
wwtp_sf <- st_as_sf(syn_sites[15,], coords=c("Long","Lat"),remove=F, crs=4326)
up_sites_sf <- st_as_sf(upper_sites, coords = c('longitude', 'latitude'),
remove = F, crs = 4326)
carter_sites <- filter(up_sites_sf, type == 'now')
hall_sites <- filter(up_sites_sf, type == 'hall')
# get streamlines, etc
# nhc_lines = nhdplusTools::navigate_nldi(
# nldi_feature = list(featureSource = "comid",
# featureID = as.character(comid)),
# mode = 'UT', data_source = '', distance_km = 100)
#
# st_write(st_combine(nhc_lines),
# dsn = 'data/map_files',
# layer = 'stream_lines',
# driver = 'ESRI shapefile',
# delete_layer = TRUE)
#
# hall_study_reach = filter(nhc_lines,
# nhdplus_comid %in% c(8895490, 8895362, 8895420, 8895440)) %>%
# st_combine()
#
# st_write(hall_study_reach,
# dsn = 'data/map_files',
# layer = 'hall_study_reach',
# driver = 'ESRI shapefile',
# delete_layer = TRUE)
#
# nhc_ripar = hall_study_reach %>%
# st_transform(crs = 4326) %>%
# st_buffer(dist = 250) %>%
# st_transform(crs = 4326)
#
# st_write(nhc_ripar,
# dsn = 'data/map_files',
# layer = 'riparian',
# driver = 'ESRI shapefile',
# # driver = 'GeoJSON')
# delete_layer = TRUE)
#synoptic jobbies
cur_nhd <- st_read("data/map_files/NHC_NHD_subset.gpkg")
NHC_mainstem <- cur_nhd %>%
filter(gnis_name=="Mud Creek" | gnis_name=="New Hope Creek"|comid==8888400)
UNHC_NHD <- NHC_mainstem %>%
filter(hydroseq > 250019295 &gnis_name=="New Hope Creek")
UMC_NHD <- NHC_mainstem %>%
filter(hydroseq > 250087220 & gnis_name=="Mud Creek" )
longitudinal_transect <- NHC_mainstem %>%
filter(!comid %in% UNHC_NHD$comid)%>%
filter(!comid %in% UMC_NHD$comid)
# load shape files
duke_forest_boundary <- st_read("data/map_files/2019_boundary.shp")
korstian_div <- filter(duke_forest_boundary, DIVISION == 'Korstian') %>%
sf::st_transform(crs=4326)
stream_line <- st_read("data/map_files/stream_lines.shp")
riparian_boundary <- st_read("data/map_files/riparian.shp")
study_reaches_line <- st_read("data/map_files/hall_study_reach.shp")
watershed_boundary <- st_read('data/map_files/nhcwsboundary.shp')
mud_ws_boundary <- st_read('data/map_files/mudwsboundary.shp')
upper_ws_boundary <- st_read("data/map_files/nhc_wb_streamstats.shp")
# make maps
# tmap_mode('view')
tmap_mode('plot')
basic_map = tm_shape(watershed_boundary) +
tm_polygons(alpha=0, border.col="black", lwd=1.5) +
tm_shape(stream_line) + tm_lines(col='grey45', lwd = 1.2) +
tm_compass(type="arrow", position=c("right", "bottom", show.labels=3),
size=3, text.size=1) +
tm_scale_bar(text.size = 1, position="right", width = .2) +
tm_style(style='white') +
tm_layout(frame=FALSE, bg.color="white")
tmap_save(basic_map, filename="figures/map/basic_watershed.png",
bg="white", dpi = 300)
synoptic = tm_shape(watershed_boundary) +
tm_polygons(alpha=0, border.col="black", lwd=1.5) +
tm_shape(stream_line) + tm_lines(col='grey45',lwd=1.2) +
# tm_shape(longitudinal_transect) + tm_lines(col='black', lwd=2) +
tm_shape(syn_sites_sf) + tm_dots( col="brown3", size=0.4) +
tm_compass(type="arrow", position=c("right", "bottom", show.labels=3),
size=3, text.size=1) +
tm_scale_bar(text.size = 1, position="right", width = .2) +
tm_style(style='white')
tmap_save(synoptic, filename="figures/map/synoptic_watershed.png",
bg="white", dpi = 300)
longitudinal = tm_shape(watershed_boundary) +
tm_polygons(alpha=0, border.col="black", lwd=1.5) +
tm_shape(stream_line) + tm_lines(col='grey45',lwd=1.2) +
tm_shape(longitudinal_transect) + tm_lines(col='steelblue', lwd=3) +
tm_shape(syn_sites_sf) + tm_dots( col="brown3", size=0.4) +
tm_compass(type="arrow", position=c("right", "bottom", show.labels=3),
size=3, text.size=1) +
tm_scale_bar(text.size = 1, position="right", width = .2) +
tm_style(style='white')
tmap_save(longitudinal, filename="figures/map/longitudinal_watershed.png",
bg="white", dpi = 300)
metab = tm_shape(watershed_boundary) +
tm_polygons(alpha=0, border.col="black", lwd=1.5) +
tm_shape(korstian_div) + tm_polygons(alpha=0.5, col = 'springgreen3',
border.col="transparent", lwd=.5) +
# tm_shape(riparian_boundary) + tm_polygons(alpha=0, col="black", lwd=1.5,
# border.col='steelblue3', border.alpha=0.8) +
# tm_shape(study_reaches_line) + tm_lines(col='steelblue3', lwd=2.5) +
tm_shape(stream_line) + tm_lines(col='grey45',lwd=1.2) +
# tm_shape(longitudinal_transect) + tm_lines(col='black', lwd=2) +
tm_shape(carter_sites) + tm_dots(col="brown3", size=0.4) +
# tm_shape(hall_sites) + tm_symbols(shape=3, col="black", size=0.6, border.lwd=2) +
tm_compass(type="arrow", position=c("right", "bottom", show.labels=3),
size=3, text.size=1) +
tm_scale_bar(text.size = 1, position="right", width = .2) +
tm_style(style='white') +
tm_layout(frame=FALSE, bg.color="white") +
tm_add_legend(type='fill', labels = 'Duke Forest', col = 'springgreen3', alpha=0.1,
border.col='transparent') +
tm_legend(show=TRUE, position=c('right', 'top'), outside=FALSE, bg.color='gray97',
frame=FALSE, text.size=1.1)
tmap_save(metab, filename="figures/map/metab_watershed.png",
bg="white", dpi = 300)
hall = tm_shape(watershed_boundary) +
tm_polygons(alpha=0, border.col="black", lwd=1.5) +
tm_shape(korstian_div) + tm_polygons(alpha=0.5, col = 'springgreen3',
border.col="transparent", lwd=.5) +
# tm_shape(riparian_boundary) + tm_polygons(alpha=0, col="black", lwd=1.5,
# border.col='steelblue3', border.alpha=0.8) +
# tm_shape(study_reaches_line) + tm_lines(col='steelblue3', lwd=2.5) +
tm_shape(stream_line) + tm_lines(col='grey45',lwd=1.2) +
# tm_shape(longitudinal_transect) + tm_lines(col='black', lwd=2) +
tm_shape(hall_sites) + tm_dots( col="black", size=0.01) +
tm_shape(carter_sites) + tm_dots(col="brown3", size=0.4) +
tm_compass(type="arrow", position=c("right", "bottom", show.labels=3),
size=3, text.size=1) +
tm_scale_bar(text.size = 1, position="right", width = .2) +
tm_style(style='white') +
tm_layout(frame=FALSE, bg.color="white") +
tm_add_legend(type='fill', labels = 'Duke Forest', col = 'springgreen3', alpha=0.1,
border.col='transparent') +
tm_add_legend(type='symbol', labels = ' 1969 sites', col = 'black',
size=0.01, shape=3, border.lwd=2) +
tm_legend(show=TRUE, position=c('right', 'top'), outside=FALSE, bg.color='gray97',
frame=FALSE, text.size=1.1)
tmap_save(hall, filename="figures/map/hall_watershed.png",
bg="white", dpi = 300)
df = tm_shape(watershed_boundary) +
tm_polygons(alpha=0, border.col="black", lwd=1.5) +
tm_shape(korstian_div) + tm_polygons(alpha=0.5, col = 'springgreen3',
border.col="transparent", lwd=.5) +
tm_shape(stream_line) + tm_lines(col='grey45',lwd=1.2) +
# tm_shape(hall_sites) + tm_dots( col="black", size=0.01) +
tm_compass(type="arrow", position=c("right", "bottom", show.labels=3),
size=3, text.size=1) +
tm_scale_bar(text.size = 1, position="right", width = .2) +
tm_style(style='white') +
tm_layout(frame=FALSE, bg.color="white") +
tm_add_legend(type='fill', labels = 'Duke Forest', col = 'springgreen3', alpha=0.1,
border.col='transparent') +
tm_add_legend(type='symbol', labels = ' 1969 sites', col = 'black',
size=0.01, shape=3, border.lwd=2) +
tm_legend(show=TRUE, position=c('right', 'top'), outside=FALSE, bg.color='gray97',
frame=FALSE, text.size=1.1)
tmap_save(df, filename="figures/map/df_watershed.png",
bg="white", dpi = 300)
metab = tm_shape(watershed_boundary) +
tm_polygons(alpha=0, border.col="black", lwd=1.5) +
tm_shape(korstian_div) + tm_polygons(alpha=0.3, col = 'springgreen3',
border.col="transparent", lwd=.5) +
# tm_shape(riparian_boundary) + tm_polygons(alpha=0, col="black", lwd=1.5,
# border.col='steelblue3', border.alpha=0.8) +
# tm_shape(study_reaches_line) + tm_lines(col='steelblue3', lwd=2.5) +
tm_shape(stream_line) + tm_lines(col='grey45',lwd=1.2) +
# tm_shape(longitudinal_transect) + tm_lines(col='black', lwd=2) +
tm_shape(carter_sites) + tm_dots(col="brown3", size=0.4) +
# tm_shape(hall_sites) + tm_symbols(shape=3, col="black", size=0.6, border.lwd=2) +
tm_compass(type="arrow", position=c("right", "bottom", show.labels=3),
size=3, text.size=1) +
tm_scale_bar(text.size = 1, position="right", width = .2) +
tm_style(style='white') +
tm_layout(frame=FALSE, bg.color="white") +
tm_add_legend(type='symbol', labels = ' Study sites', col = 'red2', size = 0.7,
shape=1) +
tm_add_legend(type='symbol', labels = ' Hall 1972 sites', col = 'black',
size=0.5, shape=3, border.lwd=2) +
tm_add_legend(type='line', labels = 'Study reach', col = 'steelblue3', lwd = 2.5) +
tm_add_legend(type='line', labels = 'Riparian zone', col = 'steelblue3', lwd = 1) +
tm_add_legend(type='fill', labels = 'Duke Forest', col = 'springgreen3', alpha=0.3,
border.col='transparent') +
tm_legend(show=TRUE, position=c('right', 'top'), outside=FALSE, bg.color='gray97',
frame=FALSE, text.size=1.1)
tmap_save(basic_map, filename="figures/map/basic_watershed.png",
bg="white", dpi = 300)
map_without_colon = tm_shape(watershed_boundary) + tm_polygons(alpha=0, border.col="black", lwd=1) +
tm_shape(korstian_div) + tm_polygons(alpha=0.3, col = 'springgreen3',
border.col="transparent", lwd=.5) +
tm_shape(study_reaches_line_shortened) + tm_lines(col='steelblue3', lwd=2.5) +
tm_shape(stream_line_shortened) + tm_lines(col='black', alpha=0.5, lwd=0.5) +
tm_shape(carter_sites) + tm_symbols(shape=1, col="red2", size=0.6, border.lwd=2) +
tm_scale_bar(text.size = 1, position="left") +
tm_compass(type="arrow", position=c("right", "bottom", show.labels=3),
size=5, text.size=1) +
tm_style(style='white') +
tm_layout(frame=TRUE, bg.color="white") +
tm_add_legend(type='symbol', labels = ' Study sites', col = 'red2', size = 0.7,
shape=1) +
tm_add_legend(type='line', labels = 'Study reach', col = 'steelblue3', lwd = 2.5) +
tm_add_legend(type='fill', labels = 'Duke Forest', col = 'springgreen3', alpha=0.3,
border.col='transparent') +
tm_legend(show=TRUE, position=c('left', 'top'), outside=FALSE, bg.color='gray97',
frame=TRUE, text.size=1.1)
tmap_save(map_without_colon, filename="figs/map_without_colon.png",
bg="white", dpi = 300)
tmap_save(map_with_colon, filename="figs/map_with_colon.png",
bg="white", dpi = 300)
# plot
tmap_mode("view")
tm_shape(nhc_ws)+tm_polygons(alpha=0, border.col="black", lwd=.5)+
tm_shape(mud_ws)+tm_polygons(alpha=1, border.col="black",lwd=.5)+
tm_shape(cur_nhd)+tm_lines(col = "grey60") +
tm_shape(longitudinal_transect) + tm_lines(lwd=2)+
tm_shape(sites_sf)+tm_dots(col="brown3", size=.05)+
tm_shape(wwtp_sf)+tm_markers(shape=3, col="lightblue",size=.05)+
tm_shape(pt_sf) + tm_dots()+
tm_scale_bar(text.size = 1, position = "left")
tmap_mode("plot")
par(bg=NA)
map<-tm_shape(nhc_ws)+tm_polygons(alpha=0, border.col="black", lwd=.5)+
# tm_shape(mud_ws)+tm_polygons(alpha=0, border.col="black",lwd=.5)+
tm_shape(cur_nhd)+tm_lines(col = "grey60") +
tm_shape(longitudinal_transect) + tm_lines(lwd=2)+
tm_shape(sites_sf)+tm_dots(col="brown3", size=.05)+
# tm_shape(wwtp_sf)+tm_markers(shape=3, col="lightblue",size=.05)+
tm_scale_bar(text.size = 1, position = "left") +
tm_compass(type="arrow",position=c("right","bottom", show.labels=3))+
tm_layout(frame=FALSE, bg.color="transparent")
tmap_save(map, filename="NHCmap_scalebar.eps", bg="transparent", dpi = 1200,
)
# Plot of longitudinal transect
long_sites_sf <- sites_sf[sites_sf$site!="MC751",]
tmap_mode("view")
tm_shape(cur_nhd)+tm_lines(col = "grey80") +
tm_shape(longitudinal_transect) + tm_lines(lwd=2)+
tm_shape(long_sites_sf)+tm_dots(col="brown3", size=.05)+
# tm_shape(wwtp_sf)+tm_markers(shape=3, col="lightblue",size=.05)+
tm_scale_bar(text.size = 1, position = "left")
# Plot North carolina with piedmont shape
pied <- readOGR(dsn="ncpiedmont_shape",
layer="Piedmont_shape")
par(bg=NA)
png("NCmap.png",bg="transparent", type="windows")
map('state',region='North Carolina',fill=TRUE, col="white",bg="transparent",lwd=2)
plot(pied,
add=TRUE,
col="grey90")
points(wwtp_sf$Long, wwtp_sf$Lat, col="brown3", pch=22, cex=3)
dev.off()
|
63b4be9a4ab128b843061ca98d89a16fe412f015
|
e7af507b207ca3f018a1ffed68026dfe77c21eff
|
/R/eng-mod_focus_20200406_mort_veneto.R
|
a1fef47be0dffd25847521432dd67c3c246a68e8
|
[
"CC-BY-4.0"
] |
permissive
|
UBESP-DCTV/covid19ita
|
24a4f692988dbd78ffe43eb14258d30053ab37f6
|
0a94f4da7ab02ae469e11e6c5f7ca61c6122e3a1
|
refs/heads/master
| 2023-04-11T07:54:38.776100
| 2023-03-23T18:03:40
| 2023-03-23T18:03:40
| 246,156,761
| 9
| 3
|
CC-BY-4.0
| 2020-11-22T15:46:38
| 2020-03-09T22:40:08
|
R
|
UTF-8
|
R
| false
| false
| 14,391
|
r
|
eng-mod_focus_20200406_mort_veneto.R
|
#' focus_20200406_mort_veneto UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
eng_mod_0406_mort_ve_ui <- function(id) {
ns <- NS(id)
fluidPage(
fluidRow(box(
width = 12,
p(HTML("
The National Institute of Statistics (Istat) made available on its
website (https://www.istat.it/it/archivio/240401) the mortality data
of 1084 Italian municipalities, with data updated to the 21st of
March 2020. 1.<sup>1</sup>
")),
p(HTML("
As it is possible to see on the Istat website,
the municipalities that take part in this
analysis are the ones that counted at least 10 deaths in the period
1 January 2020 to 28 March 2020 and that registered a rise in mortality
of at least 20 % in the first 21 or 28 days of March 2020.
The selection criteria introduced by ISTAT causes an
overestimate of mortality. For this reason the numbers we present
here must be seen as the highest forseeable values.
"))
)),
fluidRow(box(
width = 12,
p(HTML("
Overall mortality is a strong indicator as it has low
susceptibility to errors or discrepancies in assessments
and it accounts for both the mortality caused directly by
a specific pathology and the mortality caused in an indirect
way, as for example by difficulties for people who suffer
from different pathologies in accessing the hospital services.
")),
p(HTML("
Moreover, the overall mortality is not affected by diagnostic
questions or difficulties in coding cause of death and is
therefore a useful foundation on which we can build an
accurate estimate of the effects of the COVID-19 epidemics.
")),
p(HTML("
Data is made available by Istat in different tables, which
can be accessed and downloaded from the official website.
The tables allow an immediate reading and can also be used
for further analysis. We hence used the data in order to
make some descriptive analysis, that are essentially presented
in the form of graphs, in order to illustrate the trend in
overall mortality by geographic area, sex, age and time period.
")),
p(HTML("
These are some preliminary analyses that aim at sharing
information during times of emergencies, that will be improved
and explored further in the coming weeks. In particular the
current goal is only to give a reasoned presentation of the
absolute values and the change percentages. Further analyses
will be conducted in order to reach a better modelling of
the trend and to improve the indices of the confidence intervals.
")),
p(HTML("
These analyses want to answer to the following questions:
<ul>
<li> What is the entity of the observed mortality change
if we compare the period from the 1st to the 21st of March
2019 to the period from the 1st to the 21st of March 2020?
<li> How the mortality change distributed by sex, age and
province of residency?
<li> If we also consider previous years, starting from 2015,
can we observe relevant change throughout the different years?
And again, what is the distribution by sex, age and province
of residency?
<li> Starting from which week of the year is it possible
to observe change of the overall mortality?
</ul>
")),
p(HTML("
Note on aggregation and numerosity of data: some
variables were grouped into wider categories, as
indicated in the analyses results.
"))
)),
fluidRow(box(
width = 12,
h2(HTML("
How much did the overall mortality change from last year
(1-21 March 2019 vs 1-21 March 2020)? How is the mortality
change distributed by sex, age, and province of residency?
")),
p(HTML("
The percentage change in mortality (1-21 March 2019 vs 1-21
March 2020) was estimated by region, sex and age aggregated
data. In this case, aggregation by province only includes
the aforementioned municipalities made available by Istat.
Data was categorized as in the table provided by Istat
(https://www.istat.it/it/files//2020/03/Tavola-sintetica-decessi.xlsx).
Age categories are: 65-74, 75-84, over 85.
")),
p(HTML("
The percentage change is defined as:
")),
p(HTML("
change<sub>%</sub> =
100 * (
deaths<sub>2020</sub> --
deaths<sub>2019</sub>
) /
deaths<sub>2019</sub>
")),
p(HTML("
This index appears in the original table, computed for
each municipality. In order to reduce statistical variability
given by random fluctuations, which is rather high in those
municipalities with a low number of inhabitants, we computed
the percentage change on a regional level. Total deaths
by provincies and percentage change (from 2019 to 2020) are
shown in the table.<sup>2</sup>
"))
)),
fluidRow(box(
width = 12,
p(HTML("
The analysis was also conducted separately by age class and
sex, and results are presented in the following graphs
(Figure 1 and Figure 2)
"))
)),
fluidRow(box(plotlyOutput(ns("fig_1_age")),
title = "Figure 1: Change percentage by age and province. 1-21 March 2019 vs. 1-21 March 2020.",
width = 12
)),
fluidRow(box(
width = 12,
p(HTML("
For a correct reading of the percentage change
it is necessary to remember that number of total deaths
is very different from one province to another as the
sample size can change quite a lot. In some provincies
the mortality change seems quite important, but it is
actually given by a small difference in terms of number
of deaths (Table 1).
"))
)),
fluidRow(box(
width = 12, Title = "Table 1: Change percentage by class and province. 1-21 March 2019 vs. 1-21 March 2020.",
DT::DTOutput(ns("tab_1_age"))
)),
fluidRow(box(plotlyOutput(ns("fig_2_sex")),
title = "Figure 2: Change percentage by sex and province. Periodo 1-21 March 2019 vs. 1-21 March 2020.",
footer = "f: female; m: male; mf: total",
width = 12,
)),
fluidRow(box(
width = 12, Title = "Table 2: Change percentage by sex and province. 1-21 March 2019 vs. 1-21 March 2020.",
DT::DTOutput(ns("tab_2_sex"))
)),
fluidRow(box(
width = 12,
h2(HTML("
Considering the data on mortality starting from 2015,
what is the entity of the change registered throughout
the years by age and province of residency?
")),
p(HTML("
The data provided by Istat allows to analyse the mortality
trend starting from 2015. Data can be found at
https://www.istat.it/it/files//2020/03/dati-comunali-settimanali-ANPR-1.zip.
Further analyses will be conducted in the coming weeks
to better explore mortality between 2015 and 2020.
")),
p(HTML("
Deaths in all municipalities in the Istat database
belonging to the same province were summed together
in order to obtain the number of deaths by province.
The graph here below (Figure 3) shows the number of total
deaths by province from 2015 to 2020.
"))
)),
fluidRow(box(plotlyOutput(ns("fig_3_year_all")),
title = "Figure 3: Number of deaths by province in the period 1-21 March from 2015 to 2020.",
width = 12,
)),
fluidRow(box(
width = 12,
p(HTML("
The graphs here below (Figure 4) show how mortality
changed from 2015 to 2020 by province and age.
Deaths in all municipalities in the Istat database
belonging to the same province were summed together
in order to obtain the number of deaths by province. Age
classes were defined as follows: under 64 (putting
together the classes 0-14 and 15-64 of the original
table), 65-74, over 75.
")),
p(HTML("
It is necessary to keep in mind that the graphs
only show absolute numbers, hence differences
between provinces are mainly due to different sample sizes.
"))
)),
fluidRow(box(
plotlyOutput(ns("fig_4_year_age")),
title = "Figure 4: Number of deaths by province and age in the period 1-21 March from 2015 to 2020.",
width = 12,
)),
fluidRow(box(
width = 12,
h2(HTML("
In which week of the year is it possible to notice
change in the overall mortality?
")),
p(HTML("
Data regarding the 122 municipalities of the Veneto
Region, as presented in the table at
https://www.istat.it/it/files//2020/03/dati-comunali-settimanali-ANPR-1.zip
for the period of time that goes from the 1st of January
to the 21st of March, can be helpful to answer this question.
The data in the table is divided into time slots of 7 days,
except for the period from the 1st to the 10th of January;
this period was therefore excluded from the analysis. The
following graphs (Figure 5) present the trend by age
and province. The graphs report on the horizontal axis
the date that represents the beginning of each time slot.
")),
)),
fluidRow(box(
plotlyOutput(ns("fig_6_week_age")),
title = "Figure 5: Number of weekly deaths by age and province from the 12th to the 21st of March 2020.",
width = 12,
)),
fluidRow(box(
width = 12, title = "Notes",
p(HTML("
<sup>1</sup> For further information on data collection
see the Istat methodology.
<br>
<sup>2</sup> 2.If the index is equal to 100% it means
the mortality has doubled.
"))
))
)
}
#' focus_20200406_mort_veneto Server Function
#'
#' @noRd
eng_mod_0406_mort_ve_server <- function(id) {
# Data preparation ------------------------------------------------
## 1-2: variazione percentuale 2019-2020 --------------------------
### by age (fig 1)
gg_fig_1_age <- mort_data_veneto_age %>%
ggmort("Age class", x = "provincia") +
ggtitle("Overall mortality by age class",
subtitle = "1-21 March 2019 vs 2020"
)
### by age (fig 2)
gg_fig_2_sex <- mort_data_veneto_sex %>%
ggmort("Sex", x = "provincia") +
ggtitle("Overall mortality by sex",
subtitle = "1-21 March 2019 vs 2020"
)
## 3: mortalit\u00E0 prime tre settimane di marzo 2015-2020 ------------
data_year_marzo_veneto <- mort_data_comuni %>%
dplyr::filter(
.data$settimana %in%
c("01/03-07/03", "08/03-14/03", "15/03-21/03"),
.data$regione == "Veneto"
)
### all (fig 3)
data_year_marzo_veneto_all <- data_year_marzo_veneto %>%
dplyr::group_by(.data$provincia, .data$year) %>%
dplyr::summarise(decessi = sum(.data$n_death))
gg_fig_3_year_all <- data_year_marzo_veneto_all %>%
ggplot(aes(
x = .data$year,
y = .data$decessi,
colour = .data$provincia
)) +
geom_point() +
geom_smooth(se = FALSE) +
labs(y = "Number of deaths 1-20 March") +
theme(
axis.text.x = element_text(angle = 60, hjust = 1),
panel.background = element_blank()
)
### by age (fig 4)
data_year_marzo_veneto_age <- data_year_marzo_veneto %>%
dplyr::group_by(
.data$provincia, .data$year, .data$classe_di_eta
) %>%
dplyr::summarise(decessi = sum(.data$n_death))
gg_fig_4_year_age <- data_year_marzo_veneto_age %>%
ggplot(aes(
x = .data$year,
y = .data$decessi,
colour = .data$provincia
)) +
geom_point() +
geom_smooth(se = FALSE) +
facet_wrap(.data$classe_di_eta ~ ., scales = "free_y") +
labs(y = "Number of deaths 1-20 March") +
theme(
axis.text.x = element_text(angle = 60, hjust = 1),
panel.background = element_blank()
)
## 4: prime settimane 2020 ----------------------------------------
data_inizio_2020_veneto <- mort_data_comuni %>%
dplyr::filter(
.data$settimana != "01/01-11/01",
.data$year == 2020,
.data$regione == "Veneto"
) %>%
dplyr::mutate(
settimana = substr(.data$settimana, start = 1, stop = 5) %>%
as.Date(format = "%d/%m")
)
data_week_veneto <- data_inizio_2020_veneto %>%
dplyr::filter(.data$sex == "totale") %>%
dplyr::group_by(
.data$provincia,
.data$settimana,
.data$classe_di_eta
) %>%
dplyr::summarise(decessi = sum(.data$n_death))
### bay age (fig 6)
gg_fig_6_week_age <- data_week_veneto %>%
ggplot(aes(
x = .data$settimana,
y = .data$decessi,
colour = .data$provincia
)) +
geom_point() +
geom_smooth(se = FALSE) +
facet_wrap(.data$classe_di_eta ~ ., scales = "free_y") +
labs(y = "Number of deaths 1-20 March") +
theme(
axis.text.x = element_text(angle = 60, hjust = 1),
panel.background = element_blank()
)
# Output (reactive) objects ---------------------------------------
callModule(id = id, function(input, output, session) {
ns <- session$ns
output$fig_1_age <- renderPlotly({
clean_ggplotly(gg_fig_1_age)
})
output$tab_1_age <- DT::renderDT({
mort_data_veneto_age
})
output$fig_2_sex <- renderPlotly({
clean_ggplotly(gg_fig_2_sex)
})
output$tab_2_sex <- DT::renderDT({
mort_data_veneto_sex
})
output$fig_3_year_all <- renderPlotly({
clean_ggplotly(gg_fig_3_year_all)
})
output$fig_4_year_age <- renderPlotly({
clean_ggplotly(gg_fig_4_year_age)
})
output$fig_6_week_age <- renderPlotly({
clean_ggplotly(gg_fig_6_week_age)
})
})
}
## To be copied in the UI
#> mod_0406_mort_veneto_ui("magnani_1")
## To be copied in the server
#> mod_0406_mort_veneto_server("magnani_1")
|
435e2a2ce02e0436020fb1d8c6d4622c29ee9ced
|
8da89ff84396fb73d5b139398e351adf0cf1a080
|
/man/score.Rd
|
51a22474de12413a57bb4f69728e4c00aa8ae415
|
[] |
no_license
|
UBESP-DCTV/rexams
|
f4ce0347213a2eac678414910082c3daf8d9f977
|
f2353d7db89b8386be0feeeda365e82489881db5
|
refs/heads/master
| 2020-03-20T10:50:10.725246
| 2018-09-10T11:32:37
| 2018-09-10T11:32:37
| 137,386,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 235
|
rd
|
score.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/score.R
\name{score}
\alias{score}
\title{Score}
\usage{
score(x)
}
\arguments{
\item{x}{a boolean}
}
\value{
1 if TRUE, -1 if FALSE
}
\description{
Score
}
|
0d2742130ef97f929fcf363df17602030dd3c9ae
|
6c498ad3e5c3b97cc4c8cb6a26b9c15d68d6a933
|
/r/man/model_search.Rd
|
78702011f5e658e08ef0a53046c7fb117b5175cb
|
[] |
no_license
|
ModelOriented/weles
|
6b327dcf4c97f0a2df637fed3b28a7bfb48aea1b
|
2b76fb0ed6b42c0395426eebdb8fcb9765dc43e7
|
refs/heads/master
| 2020-07-27T09:06:29.253125
| 2019-11-30T14:08:21
| 2019-11-30T14:08:21
| 209,040,281
| 9
| 4
| null | 2019-09-18T14:55:07
| 2019-09-17T11:53:01
|
HTML
|
UTF-8
|
R
| false
| true
| 1,788
|
rd
|
model_search.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_search.r
\name{model_search}
\alias{model_search}
\title{Find model that interests you the most}
\usage{
model_search(language = NA, language_version = NA, row = NA,
column = NA, missing = NA, classes = NA, owner = NA,
tags = c(), regex = NA)
}
\arguments{
\item{language}{search only among models written in this language}
\item{language_version}{what language version should be model written, '<n;' '>n;' '=n;' '>a;<b;'}
\item{row}{parameter descibing number of rows in training dataset, '<n;' '>n;' '=n;' '>a;<b'}
\item{column}{parameter descibing number of columns in training dataset, '<n;' '>n;' '=n;' '>a;<b'}
\item{missing}{parameter descibing number of missing values in training dataset, '<n;' '>n;' '=n;' '>a;<b'}
\item{classes}{parameter descibing number of classes in training dataset, '<n;' '>n;' '=n;' '>a;<b'}
\item{owner}{show only models created by this user}
\item{tags}{vector of tags, should be all strings}
\item{regex}{regex for searching names of models}
}
\value{
vector of models' names satisfying those restrictions
}
\description{
Function allows you advanced search of models in weles. If all parameters are default then returns all models' name in weles.
}
\examples{
\\code{
library("weles")
model_search(tags = c('example', 'easy'))
model_search(row='<15000;', tags = c('example', 'easy'))
model_search(column='>10;<15;', owner='Example user')
model_search(language='python', language_version='3.6.8', row='>1000;<10000;', column='=14;', classes='=2;', missing='=0;', owner='Example user', tags=
c('example', 'easy'), regex='^R')
}
}
\references{
\href{http://192.168.137.64/models}{\bold{models}}
\href{http://192.168.137.64/datasets}{\bold{datasets}}
}
|
6bc112eee14572d9ebb13dc4afd9eff3c04c660a
|
ba05b04ebd7376d6ea805f5c3ea5431fa30c4313
|
/scripts/nchs_vital_stats_mortality_tidy.R
|
3ae5f498714148a637b289e6d8c95983ca253a1b
|
[
"CC0-1.0"
] |
permissive
|
UACC-renedherrera/NCHS-Leading-Causes-of-Death-USA
|
fc50ab3ed36bf1947ccbe5da7e3880e350fedae5
|
72fd4655426ee0563c06b45266a6821ec06e7df5
|
refs/heads/main
| 2023-06-30T03:30:30.683943
| 2021-08-04T21:34:54
| 2021-08-04T21:34:54
| 392,426,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,925
|
r
|
nchs_vital_stats_mortality_tidy.R
|
# set up ----
# load packages to read and tidy data
library(here)
library(tidyverse)
library(janitor)
# source citation ----
#
# notes about the data:
# https://www.cdc.gov/nchs/data/dvs/Multiple-Cause-Record-Layout-2019-508.pdf
#
# set values
# url <- "https://ftp.cdc.gov/pub/Health_Statistics/NCHS/Datasets/DVS/mortality/mort2019us.zip"
# path_zip <- "data/raw"
# path_unzip <- "data/raw/mort2019us"
# zip_file <- "mort2019us.zip"
# # use curl to download
# curl::curl_download(url, destfile = paste(path_zip, zip_file, sep = "/"))
# # set value
# zipped_file <- "data/raw/mort2019us.zip"
# # unzip to folder
# unzip(zipped_file, exdir = path_unzip)
# read
mort_2019 <- read_fwf( n_max = 500,
file = "data/raw/mort2019us/VS19MORT.DUSMCPUB_r20210304",
col_positions = fwf_cols(month = c(65,66),
sex = c(69,69),
age = c(77,78),
year = c(102,105),
cause = c(160,161),
hispanic = c(484, 486),
race = c(489,490)),
col_types = c("cfccccc"))
# inspect
glimpse(mort_2019)
# month
mort_2019 %>%
count(month, sort = TRUE)
# sex
mort_2019 %>%
count(sex, sort = TRUE)
# age
mort_2019 %>%
count(age, sort = TRUE)
# cause
mort_2019 %>%
count(cause, sort = TRUE)
# hispanic
mort_2019 %>%
count(hispanic, sort = TRUE)
# race
mort_2019 %>%
count(race, sort = TRUE)
####
# still figuring out if the read data is correct
####
mort_2019 <- mort_2019 %>%
mutate(month_name = if_else(month == "01", "Jan",
if_else(month == "02", "Feb",
if_else(month == "03", "Mar",
if_else(month == "04", "Apr",
if_else(month == "05", "May",
if_else(month == "06", "Jun",
if_else(month == "07", "Jul",
if_else(month == "08", "Aug",
if_else(month == "09", "Sep",
if_else(month == "10", "Oct",
if_else(month == "11", "Nov",
if_else(month == "12", "Dec", "")))))))))))))
mort_2019 <- mort_2019 %>%
mutate(cause_name = if_else(cause == "04", "Malignant neoplasms",
if_else(cause == "18", "Major cardiovascular diseases",
if_else(cause == "17", "Alzheimer's disease",
if_else(cause == "28", "CLRD",
if_else(cause == "38", "Motor vehicle accidents",
if_else(cause == "39", "All other and unspecified accidents and adverse effects",
if_else(cause == "40", "Intentional self-harm (suicide)",
if_else(cause ="")))))))))
mort_2019 <- mort_2019 %>%
mutate(race_name = if_else(race == "01", "White",
if_else(race == "02", "Black",
if_else(race == "03", "AIAN", "Other"))))
mort_2019 <- mort_2019 %>%
mutate(hispanic_code = if_else(hispanic %in% c(100:199), "No", "Yes"))
# explore
glimpse(mort_2019)
mort_2019 %>%
count(cause, sort = TRUE)
group_by(cause) %>%
summarise(count = n())
|
28dd5aecc930d1edb74307215c94bb15e7b67bfb
|
37a70a2a8c84f353d45cd678f059cbe5446d5346
|
/day2/hw0829.R
|
9b409d236f8e774f093278e42118aceb3b3be88a
|
[] |
no_license
|
jee0nnii/DataITGirlsR
|
a27f7ce1c3f90765366f120ff85cd7f2cee60e8c
|
cc6e7b3f2d30c690a41e4ca5a165d32de47d3c3f
|
refs/heads/master
| 2021-03-19T16:49:01.412022
| 2017-11-06T15:21:56
| 2017-11-06T15:21:56
| 109,706,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,012
|
r
|
hw0829.R
|
####1월_박스오피스.excel 파일을이용해, 다음문제를풀어보자!####
getwd()
setwd()
boxoffice_jan <- read.csv("boxoffice_jan.csv")
head(boxoffice_jan)
View(boxoffice_jan)
####1.매출액의최대값, 최소값, 평균을구하여라.####
bo_j_revenue <- boxoffice_jan$매출액
bo_j_revenue
bo_j_max <- max(bo_j_revenue)
bo_j_max
bo_j_min <- min(bo_j_revenue)
bo_j_min
bo_j_mean <- mean(bo_j_revenue)
bo_j_mean
summary(bo_j_revenue)
####2.스크린수의평균과분산을구하여라.####
bo_j_screen <- boxoffice_jan$스크린수
bo_j_screen
bo_j_screen_mean <- mean(bo_j_screen)
bo_j_screen_mean
bo_j_screen_var <- var(bo_j_screen)
bo_j_screen_var
summary(bo_j_screen)
####3.국적이가장많은나라를구하여라.####
head(boxoffice_jan)
dim(boxoffice_jan)
#빈도수 출력(table)
bo_j_country <- table(boxoffice_jan$대표국적)
bo_j_country
cnt <- which.max(table(boxoffice_jan$대표국적))
# : 8번째 미국이 가장 많은 값을 가진다? : INDEX를 알려주는 WHICH
cnt
bo_j_country[cnt]
names(bo_j_country)[table(boxoffice_jan$대표국적)==max(bo_j_country)]
names(bo_j_country)[cnt] #테이블의 이름만 가져올 수 있음
#boxoffice_jan$대표국적[boxoffice_jan$대표국적==""]<-NA #NA는 카운트를 하지 않음
#table(boxoffice_jan$대표국적)
#TABLE SORT
sort(bo_j_country) #오름차순
sort(bo_j_country, decreasing = TRUE) #내림차순
sort(bo_j_country, decreasing = TRUE)[1]
####4.한국국적을갖은영화의평균상영횟수를구하여라.####
movies <- data.frame(boxoffice_jan$대표국적, boxoffice_jan$상영횟수)
movies
koreamovies <- movies[movies$boxoffice_jan.대표국적 == "한국", ]
koreamovies
mean(koreamovies$boxoffice_jan.상영횟수)
####5.미국국적을갖은영화의스크린수의summary를구하여라.####
movies_s <- data.frame(boxoffice_jan$대표국적, boxoffice_jan$스크린수)
movies_s
usamovies <- movies_s[movies_s$boxoffice_jan.대표국적 == "미국", ]
usamovies
summary(usamovies)
|
42efaa31e8baf9f71153deb5946ce913adb8ba4e
|
f0d35b6ea0ebe9517537ecdf921bb442f1fd7550
|
/ColonelHouNote/src/main/java/com/hn/opensource/scale/pre/辅助构造器.rd
|
3f84795450593b9bae01e7d88f14bcd5b1d44817
|
[] |
no_license
|
jiangsy163/ColonelHouNote
|
01191a63d51542b09ef23e9662896e8407211119
|
6173c265c82b7b0197846cf621ecebab44073ef6
|
refs/heads/master
| 2021-01-14T12:47:41.091704
| 2015-11-19T08:40:09
| 2015-11-26T08:41:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 419
|
rd
|
辅助构造器.rd
|
类能有任意多的辅助构造器
辅助构造器的名称为this,在类中定义
辅助构造器必须以一个主构造器或其他已定义的辅助构造器调用开始
class HELLOWORLD{
private var value1=“”
private var value2=“”
def this(m:String){
this() //调用主构造器
this.value1=m}
def this(m:String,n:String){
this(m) //调用已定义的辅助构造器
this.value2=n}}
|
824266e1b3ccf4dc73200fe58d3be8b4f17aefa0
|
723aa099e0ca3ae77e5e6a60e2c9f46288d9ecb6
|
/chapter5.R
|
c66a2ebec40b690b76497a9b8bfa28cfc55222c7
|
[] |
no_license
|
anhnguyendepocen/code-r-journalism
|
ef530885bf493927510b4861cb934bd238239b1f
|
78706a395f5594dae680e8bc9e6e1c2dc6b2ca8a
|
refs/heads/master
| 2020-05-01T04:16:08.877185
| 2018-08-15T21:22:45
| 2018-08-15T21:22:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,043
|
r
|
chapter5.R
|
---
title: "Chapter 5"
tutorial:
id: "code.r-journalism/chapter-5"
version: .8
output:
learnr::tutorial:
progressive: true
theme: lumen
highlight: espresso
include:
before_body: _navbar.html
runtime: shiny_prerendered
---
```{r setup, include=FALSE}
library(learnr)
library(tidyverse)
library(checkr)
knitr::opts_chunk$set(echo = FALSE)
tutorial_options(exercise.checker = checkr::check_for_learnr)
#knitr::opts_chunk$set(exercise.checker = checker)
```
## Static maps
### Map a shapefile
### Join it to data
### Map out the joined data
### Facet maps
### Map out locations as circles
## Interactive maps
### Find the latitude and longitude of an address
### Map that out in an interactive map
### Points in a polygon join
### Map out joined data as interactive choropleth
## Intro to R
### Objects
Assign the number 17 to the object **ub**
```{r object-check, exercise=T, exercise.timelimit=60}
ub 17
ub
```
### Array
Create an array of numbers: 301, 978, and 101.
Assign it to the object "years"
```{r arrays, exercise=T, exercise.timelimit=60}
years #replace this with your code
years
```
### Functions
```{r years_array, include=FALSE}
years <- c(301, 978, 101)
```
What's the average of the array of numbers assigned to "years"?
```{r average, exercise=T, exercise.timelimit=60}
(years)
```
### Classes
```{r factors_df, include=FALSE}
burgers <- data.frame(id=c(60006,60007,60008,60009, 60010), name=c("Bob", "Linda", "Louise", "Tina", "Gene"), age=c(45, 44, 12, 13, 11), shirt=c("White", "Red", "Pink", "Blue", "Yellow"))
burgers$shirt<- factor(burgers$shirt)
burgers$id <- factor(burgers$id)
burgers$name <- as.character(burgers$name)
```
Take a look at the structure of **burgers**:
```{r structure, exercise=T, exercise.timelimit=60}
```
```{r first_quiz}
quiz(
question("What kind of class is the variable id?",
answer("character"),
answer("number"),
answer("factor", correct = TRUE),
answer("date"),
random_answer_order= TRUE
))
```
## Data structures in R
### Pulling a column of data
Consider this data frame **burgers**
```{r burger_show}
burgers
```
How do you refer to the the *shirt* variable/column with []?
```{r variable1, exercise=T, exercise.timelimit=60}
# Add to the line below
burgers
```
How do you refer to the the *shirt* variable/column with $?
```{r variable2, exercise=T, exercise.timelimit=60}
# Add to the line below
burgers
```
### Pulling a row of data
Extract entire row for Linda using [].
```{r variable4, exercise=T, exercise.timelimit=60}
# Add to the line below
burgers
```
### Converting data classes
Convert the *id* variable of the **burgers** data frame to numeric.
```{r variable3, exercise=T, exercise.timelimit=60}
# Add to the line below
burgers
```
### Boolean logic
Check if Gene's age is 11.
*Note:* Is the answer the same as above (correct) or is it 1-5 (false)?
```{r boolean, exercise=T, exercise.timelimit=60}
# Modify the line of code below
age_test <- burgers$age[5] 11
age_test
```
|
488ecfa9121bf7827fdb39b8ed96bdfb336b38b4
|
a92c154607a68a36905e86843bae56065bed5a6b
|
/_RF/_run6/4_predictRF_future.R
|
e9ed7b1b411768ad76d32831056704bc2febcdd1
|
[] |
no_license
|
fabiolexcastro/centralAmericaCocoa
|
41c7af22ef3bbde8d55750ac06741fb613e59e33
|
de0c4bc0d4168fc27c8fcdfe732afe515f1855e2
|
refs/heads/master
| 2021-05-13T18:36:03.720454
| 2018-01-09T21:05:45
| 2018-01-09T21:05:45
| 116,870,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,452
|
r
|
4_predictRF_future.R
|
# CIAT, 2016 Updated: February 2017
# Author: Bunn & Castro
# Target: predict RF Future
# Load libraries
library(tidyverse)
library(raster)
library(rgdal)
library(cclust)
library(outliers)
library(dismo)
library(gtools)
library(multcomp)
library(sp)
library(rgeos)
library(outliers)
library(FactoMineR)
library(pROC)
library(randomForest)
library(stringr)
library(foreach)
library(doMC)
library(doSNOW)
# Initial setup
rm(list = ls())
OSys <- Sys.info(); OSys <- OSys[names(OSys)=='sysname']
if(OSys == 'Linux'){
setwd('//mnt/workspace_cluster_9/Coffee_Cocoa2/_cam')
} else {
if(OSys == 'Windows'){
setwd('//dapadfs/Workspace_cluster_9/Coffee_Cocoa2/_cam')
}
}
# load(paste0(path, '/_rData/_run2/clusterpresdata.rData'))
run <- '_run6'
load(paste0('_RF/', run, '/_models/rflist_5.rData'))
NumberOfClusters <- 5
ar5biofolder <- '_raster/_climate/_future/_rcp60/_asc'
yearlist <- list.files(ar5biofolder)
gcmlist <- list.files(paste0(ar5biofolder, '/', yearlist[1]))
resultsfolder <- paste0('_RF/', run, '/_results/_raw')
modelfolder <- paste0('_RF/', run, '/_models')
rff <- do.call(randomForest::combine, rflist)
myproj <- CRS("+proj=longlat +datum=WGS84")
# To windows
rasterOptions(tmpdir = '_temp')
cl <- makeCluster(32) #N?mero de nucleos a utilizar #.export = 'rasterRFprob'
registerDoSNOW(cl)
# registerDoMC(3)#1:length(gcmlist)
y <- 2
foreach(i = 1:length(gcmlist), .packages = c('raster', 'dplyr', 'gtools', 'foreach', 'randomForest', 'sp', 'stringr')) %dopar% {
print(gcmlist[i])
gcmfiles <- paste(ar5biofolder, yearlist[y], gcmlist[i], sep = '/') %>%
list.files(., full.names = T, pattern = '.asc$') %>%
grep('bio', ., value = T) %>%
mixedsort()
climatelayers <- stack(gcmfiles)
climatevalues <- data.frame(getValues(climatelayers))
print('Climate values')
#
rasterProbs <- predict(rff, climatevalues, type = 'prob') # proximity = T
rasterRF <- rowSums(rasterProbs[,c(3:(NumberOfClusters+2))])
uncertainty <- apply(rasterProbs, 1, max)
rasterRFprob <- climatelayers[[1]]
values(rasterRFprob) <- rasterRF
rasterRFuncertainty <- climatelayers[[1]]
values(rasterRFuncertainty) <- uncertainty
rasterRF <- max.col(rasterProbs, 'first')
rasterRFclass <- climatelayers[[1]]
values(rasterRFclass) <- rasterRF
# Write Raster
print("Write Raster...")
writeRaster(rasterRFclass, paste(resultsfolder, '/', yearlist[y], '/RF_', NumberOfClusters, 'Clust_', gcmlist[i], yearlist[y], '.asc', sep=''), format = 'ascii', overwrite = F)
writeRaster(rasterRFprob, paste(resultsfolder, '/', yearlist[y], '/RF_', NumberOfClusters, 'Prob_', gcmlist[i], yearlist[y], '.asc', sep=''), format = 'ascii', overwrite = F)
writeRaster(rasterRFuncertainty, paste(resultsfolder, '/', yearlist[y], '/RF_', NumberOfClusters, 'Unc_', gcmlist[i], yearlist[y], '.asc', sep=''), format = 'ascii', overwrite = F)
print('Done!')
print(gcmlist[i])
# removeTmpFiles(h = 0)
}
# other way to generate the results
print('Cluster')
rasterClust <- raster::predict(rff, climatevalues)
rasterRFclust_mask <- climatelayers[[21]]
values(rasterRFclust_mask) <- rasterClust
# Probabilistic
print('Probabilistic...')
rasterProbs <- predict(rff, climatevalues, type = "prob")
rasterRF <- rowSums(rasterProbs[,3:7]) #esto mide la certidumbre del modelo
uncertainty <- apply(rasterProbs, 1, max) #valor m?ximo por fila
rasterRFprob <- climatelayers[[21]]
values(rasterRFprob) <- rasterRF
rasterRFuncertainty <- climatelayers[[21]] #
values(rasterRFuncertainty) <- uncertainty
#
y <- list.files('Z:/_cam/_RF/_run2/_results/_raw/_2030', full.names = T, pattern = '2050')
lapply(1:length(y), function(x){
file.copy(from = y, to = 'Z:/_cam/_RF/_run2/_results/_raw/_2050')
})
# ----------------------------------------------
# Other way that is for RF model
for (i in 1:length(gcmlist)){
gcmfiles <- list.files(paste(ar5biofolder, yearlist[1], gcmlist[i], sep="/"), full.names = T)
climatelayers <- stack(gcmfiles)
names(climatelayers) <- str_replace(names(climatelayers), "_100", "")
climatelayers <- climatelayers[[descriptors]]
print(gcmlist[i])
registerDoMC(19) #cantidad de nucleos a utilizar
this <- foreach(repe = 1:25,.packages=c("randomForest","raster","sp", "stringr")) %dopar% {
if (isTRUE(file.exists(paste(resultsfolder,"Clusterfuture/Intermediates/RF_",NumberOfClusters,"clust_","rep_",repe,gcmlist[i],".asc",sep="")))) {
#nothing
}else{
load(file = paste(modelfolder,"RF_",NumberOfClusters,"Prob_","rep_",repe,".rdata", sep = "")) #uno a uno
print("rf")
coff_rf <- raster::predict(climatelayers, rfmodel, inf.rm=T) #se utiliza para un solo modelo #inf.rm remueve valores que no son finitos, es deicr los infinitos
coff_rf <- coff_rf+1 #para que se le suma 1 al raster?
#writeRaster(coff_rf, paste(resultsfolder,"Clusterfuture/Intermediates/RF_",NumberOfClusters,"clust_","rep_",repe, gcmlist[i],".asc",sep=""),format="ascii",overwrite=T) # RAW
writeRaster(coff_rf, paste(resultsfolder,"Clusterfuture/Intermediates/RF_",NumberOfClusters,"clust_","rep_",repe, gcmlist[i],".asc",sep=""),format="ascii",overwrite=T)
}
}
}
|
1794682b611be77a74fe87d16f1fca655958cca8
|
040c27633646c78485b75cc908400d184f006490
|
/Fra_Check_tree.R
|
80faa4957acb370430255190db379389241884f1
|
[] |
no_license
|
Gowthaman227/Decision-Tree-Algorithm
|
013acad3ec7361a954c09216ceae792904641090
|
d66294cd5fc5eb3118eee2cdc54e9fc2c04f5de6
|
refs/heads/master
| 2022-11-23T04:09:54.211011
| 2020-07-27T15:20:17
| 2020-07-27T15:20:17
| 282,936,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,604
|
r
|
Fra_Check_tree.R
|
library(party)
library(C50)
library(caret)
Fra_Check <- read.csv(file.choose())
View(Fra_Check)
str(Fra_Check)
Fra_Check$Taxable.Income <- ifelse(Fra_Check$Taxable.Income<= 30000,"Risky","Good")
Fra_Check$Taxable.Income <- as.factor(Fra_Check$Taxable.Income)
View(Fra_Check)
## Creating partition for training and testing dataset
Fra_Check_parti <- createDataPartition(Fra_Check$Taxable.Income,p=0.75,list=F)
Fra_Check_train <- Fra_Check[Fra_Check_parti,]
View(Fra_Check_train)
Fra_Check_test <- Fra_Check[-Fra_Check_parti,]
View(Fra_Check_test)
## Buliding a Model
Fra_Check_Model <- C5.0(Fra_Check_train$Taxable.Income~Undergrad+Marital.Status+City.Population
+Work.Experience+Urban,data=Fra_Check_train)
plot(Fra_Check_Model)
summary(Fra_Check_Model)
pred_FC <- predict.C5.0(Fra_Check_Model,Fra_Check_test)
pred <- table(Fra_Check_test$Taxable.Income,pred_FC)
View(pred)
## Accuracy
sum(diag(pred))/(sum(pred))## Accuracy is 79.33%
## Buliding Model 2 using party Function
Fra_Check_Model1 <- ctree(Fra_Check$Taxable.Income~.,data=Fra_Check_train)
summary(Fra_Check_Model1)
plot(Fra_Check_Model1)
pred_tree <- as.data.frame(predict(Fra_Check_Model1,newdata=Fra_Check_test))
pred_tree["final"] <- NULL
pred_test_df <- predict(Fra_Check_Model1,newdata=Fra_Check_test)
mean(pred_test_df==Fra_Check_test$Taxable.Income)
## Buliding Model 3 using rpart Function
library(rpart)
library(rpart.plot)
tree <- rpart(Taxable.Income~.,data=Fra_Check_train,cp=0.02)
summary(tree)
rpart.plot(tree, box.palette="RdBu", shadow.col="gray", nn=TRUE)
|
86db77ac246b8494580398c2b15f3c87f6f9f05c
|
ae3a01bcafd7b940c15d8edb9b5a4105655d5fe2
|
/source_functions/lrt_calc.R
|
679f785a86f0cd043d4aee63a7749dd63ae4cf89
|
[] |
no_license
|
harlydurbin/angus_hairshed
|
a5c29713c340f839e3fe6b6ae5f831b812555d11
|
dc31e4d5bb69945ae41753f494896aacea272133
|
refs/heads/master
| 2023-03-02T01:42:41.507055
| 2021-02-12T19:00:08
| 2021-02-12T19:00:08
| 276,473,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 172
|
r
|
lrt_calc.R
|
lrt_calc <-
function(ll_null, ll_test) {
pval <- pchisq(ll_null - ll_test, 1, lower.tail = FALSE, log.p = TRUE)/2
-log10(exp(pval))
}
|
d908e2e0c641cb37b375337624a01fa0ab293547
|
da7b0f2eaab98442566c5d8bcad60ac1d72443ae
|
/code.R
|
518fbbcd0f35b01fadede40f84d04a5137057edc
|
[] |
no_license
|
priyanka-sharma29/Hypothesis-Testing-Statistical-Analysis-Regression-Modeling
|
6fa49ab8bc14e26afad2da303e7466a10a61baac
|
851daa3293913a73c417274baa648d587a0f42f5
|
refs/heads/master
| 2020-03-06T20:54:31.690607
| 2018-08-17T08:24:58
| 2018-08-17T08:24:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,709
|
r
|
code.R
|
#Read the file
crime1 <- read.csv(file.choose(), header=T)
#Joining the county file
county<-read.csv("C:\\Users\\priya\\Desktop\\Fall 2017\\STATS\\Final Project\\data\\NC_Counties.csv",1 )
View(county)
county<-na.omit(county)
View(county)
crimenc<- merge(crime1,county,by.x="county", by.y="County.Code")
crimenc$X <- NULL
View(crimenc)
colnames(crimenc)[25] <- "CountyName"
out <- c(440,175,174,586,353,435,436,437,438,439,441)
crimeout <- crimenc[-out,]
# Plotting dependent variable crmrte with other independent variables as per hypothesis:
qplot(y=crmrte, x=prbarr, data=crimeout, main="CrimeRate and Probability of Arrest",geom=c("point", "smooth"))
cor(crimeout$prbarr,crimeout$crmrte)
qplot(y=crmrte, x=prbpris, data=crimeout, main="CrimeRate and Probability of Prison",geom=c("point", "smooth"))
cor(crimeout$prbpris,crimeout$crmrte)
qplot(y=crmrte, x=pctymle, data=crimeout, main="CrimeRate and Percentage of male",geom=c("point", "smooth"))
cor(crimeout$pctymle,crimeout$crmrte)
qplot(y=crmrte, x=taxpc, data=crimeout, main="CrimeRate and Tax per Capita",geom=c("point", "smooth"))
cor(crimeout$taxpc,crimeout$crmrte)
#CrimeRate and PolicePerCapita
plot(density(crimenc$crmrte))
plot(density(crimeout$crmrte)) # Plot after removing outliers is better
qplot(y=crmrte, x=polpc, data=crimeout, main="PolicePerCapita and CrimeRate",xlab="Police Per Capita",ylab="Crime Rate (Crimes per Capita)",geom=c("point", "smooth"))
cor(crimeout$crmrte,crimeout$polpc)
crime_polpc<- t.test(crimeout$crmrte,crimeout$polpc, conf.level = 0.95)
crime_polpc
dev.off()
plot(aggregate(crmrte ~ year, data=crimeout, FUN="mean"), main="CrimeRate over the years",xlab="Years",ylab="Crime Rate (Crimes per Capita)")
plot(aggregate(polpc ~ year, data=crimeout, FUN="mean"), main="Police Per Capita over the years",xlab="Years",ylab="Police Per Capita")
crime_byyear<- aggregate(crmrte ~ year, data=crimeout, FUN="mean")
polpc_byyear<- aggregate(polpc ~ year, data=crimeout, FUN="mean")
plot(crime_byyear$crmrte~polpc_byyear$polpc, main="CrimeRate and PolicePerCapita")
abline(lm(crime_byyear$crmrte~polpc_byyear$polpc))
cor(crime_byyear$crmrte,polpc_byyear$polpc)
west <- crimeout[crimeout$region=="west",]
central <- crimeout[crimeout$region=="central",]
other <- crimeout[crimeout$region=="other",]
crmrteagwest <- aggregate(crmrte ~ year, data= west, FUN = "mean")
crmrteagcentral <- aggregate(crmrte ~ year, data= central, FUN = "mean")
crmrteagother <- aggregate(crmrte ~ year, data= other, FUN = "mean")
polpcagwest <- aggregate(polpc ~ year, data= west, FUN = "mean")
polpcagcentral <- aggregate(polpc ~ year, data= central, FUN = "mean")
polpcagother <- aggregate(polpc ~ year, data= other, FUN = "mean")
par(mfrow=c(2,2))
plot(crmrteagwest$crmrte~ polpcagwest$polpc,main="Crime Rate vs Police per Capita in the West",xlab="Police Per Capita",ylab="Crime Rate (per Capita)")
abline(lm(crmrteagwest$crmrte~ polpcagwest$polpc),col="red",lwd=2)
cor(crmrteagwest$crmrte, polpcagwest$polpc)
plot(crmrteagcentral$crmrte~ polpcagcentral$polpc,main="Crime Rate vs Police per Capita in Central",xlab="Police Per Capita",ylab="Crime Rate (per Capita)")
abline(lm(crmrteagcentral$crmrte~ polpcagcentral$polpc),col="red",lwd=2)
cor(crmrteagcentral$crmrte, polpcagcentral$polpc)
plot(crmrteagother$crmrte~ polpcagother$polpc,main="Crime Rate vs Police per Capita in other regions",xlab="Police Per Capita",ylab="Crime Rate (per Capita)")
abline(lm(crmrteagother$crmrte~ polpcagother$polpc),col="red",lwd=2)
cor(crmrteagother$crmrte, polpcagother$polpc)
#Crime rate by region (Not holding constant for years, may not be useful)
par(mfrow=c(2,2))
qplot(y=crmrte, x=polpc, data=west, main="Crime Vs. Police - West", geom=c("point", "smooth"))
qplot(y=crmrte, x=polpc, data=central, main="Crime Vs. Police - Central", geom=c("point", "smooth"))
qplot(y=crmrte, x=polpc, data=other, main="Crime Vs. Police - Other", geom=c("point", "smooth"))
cor(crime_byyear$crmrte,polpc_byyear$polpc)
crime_polpc1<- t.test(crime_byyear$crmrte,polpc_byyear$polpc, conf.level = 0.95)
crime_polpc1
#CrimeRate and Density
qplot(y=crmrte, x=density, data=crimeout,xlab="Density (Hundreds of people per km)",ylab="Crime Rate (Crimes per Capita)", main="Crime Rate and Density" , geom=c("point", "smooth"))
qplot(y=crmrte, x=density, data=crimeout,xlab="Density",ylab="Crime Rate", main="Crime Rate and Density" , facets=~year, geom=c("point", "smooth"))
qplot(y=crmrte, x=density, data=crimeout,xlab="Density",ylab="Crime Rate", main="Crime Rate and Density" , facets=~smsa, geom=c("point", "smooth"))
qplot(y=crmrte, x=density, data=crimeout,xlab="Density",ylab="Crime Rate", main="Crime Rate and Density" , facets=~region, geom=c("point", "smooth"))
#NUll Hypothesis -> There is no relation between Crime Rate and Density
#Alternate Hypothesis -> There is a relation between Crime Rate and Density
mod_density<- t.test(crimeout$crmrte, crimeout$density, conf.level = 0.95)
mod_density
#Since the P-value is less than 0.05 we reject the null and accept the alternate hypothesis.
#Co-relation between Crime rate and density:
cor(crimeout$crmrte,crimeout$density)
#CrimeRate and Wages
privwageavg<- (crimeout$wcon
+ crimeout$wfir
+ crimeout$wtuc
+ crimeout$wtrd
+ crimeout$wser)/5
govwageavg<- (crimeout$wloc
+ crimeout$wsta
+ crimeout$wfed)/3
crimeout["PrivAvgWage"] <- privwageavg
crimeout["GovAvgWage"] <- govwageavg
# Standardizing wages:
ln <- length(crimeout$county)
rw <- c(90.9/90.9, 90.9/96.5, 90.9/99.6, 90.9/103.9, 90.9/107.6, 90.9/109.6, 90.9/113.6)
real<- rep(rw, length.out = ln)
crimeout$rprivwageavg <- crimeout$PrivAvgWage*real
crimeout["RealPrivAvgWage"] <- crimeout$rprivwageavg
crimeout$rgovwageavg <- crimeout$GovAvgWage*real
crimeout["RealGovAvgWage"] <- crimeout$rgovwageavg
length(crimeout$rprivwageavg)
View(crimeout)
length(crimeout$PrivAvgWage)
length(crimeout$year)
wages_by_year<-aggregate(PrivAvgWage+GovAvgWage ~ year, data=crimeout,FUN = "mean")
plot(crime_byyear$crmrte~wages_by_year$`PrivAvgWage + GovAvgWage`)
abline(lm(crime_byyear$crmrte~wages_by_year$`PrivAvgWage + GovAvgWage`),col="red",lw=2)
cor(crime_byyear$crmrte,wages_by_year$`PrivAvgWage + GovAvgWage`)
wages_by_year<-aggregate(PrivAvgWage ~ year, data=crimeout,FUN = "mean")
plot(crime_byyear$crmrte~wages_by_year$PrivAvgWage)
abline(lm(crime_byyear$crmrte~wages_by_year$PrivAvgWage))
cor(crime_byyear$crmrte,wages_by_year$PrivAvgWage)
wages_by_year<-aggregate(GovAvgWage ~ year, data=crimeout,FUN = "mean")
plot(crime_byyear$crmrte~wages_by_year$GovAvgWage)
abline(lm(crime_byyear$crmrte~wages_by_year$GovAvgWage))
cor(crime_byyear$crmrte,wages_by_year$GovAvgWage)
crime_wages<- t.test(crime_byyear$crmrte,wages_by_year$`PrivAvgWage + GovAvgWage`, conf.level = 0.95)
crime_wages
View(crimeout)
modsmden<- lm(crmrte ~ density, data=crimeout)
summary(modsmden)
ggplot(data=crimeout, # data
aes(x=crmrte, y=polpc, color=region)) +
geom_point(pch=20, size=3) +
stat_smooth(method="lm", se=F, lwd=1.5) +
labs(title="Crime Rate vs. Police per Capita", x="Police Per Capita", y="Crime Per Capita") # annotation
View(describe(crimeout$crmrte))
# Building final model from the statistically significant variables:
model_final <- lm(crmrte ~ polpc+prbarr+density+prbconv, data = crimeout)
summary(model_final)
# Since the adjusted R-squared value is 65.8%, this model can predict 65.8% of a county's crime rate
# given the variables chosen.
|
96498262bd0592045493ae35543f10033a30cac6
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610056345-test.R
|
2beafc8386210f4a7301dea8081d82455a534718
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
1610056345-test.R
|
testlist <- list(a = 1650614882L, b = 1650614882L, x = c(1650614882L, 1650614882L, 1650614882L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
d1d2e711a159176de181632f75203533856a744e
|
20f94728c7b940abe1b4232f697811984a305cd0
|
/plot3.R
|
5650dda6bb692a86b8e98799f309dfca0d05afe5
|
[] |
no_license
|
MichaelSzczepaniak/ExData_Plotting1
|
9fc8f87f2e9536b9ce90bb4197af72d201b524f4
|
441c117c87ee4cd88adb254e1ec7d211123c2c0f
|
refs/heads/master
| 2020-12-25T07:22:00.398890
| 2015-08-05T16:45:04
| 2015-08-05T16:50:56
| 40,136,474
| 0
| 0
| null | 2015-08-03T16:46:55
| 2015-08-03T16:46:55
| null |
UTF-8
|
R
| false
| false
| 2,136
|
r
|
plot3.R
|
## Reads in the data to generate plot
## fileName - location of the data file, e.g. "household_power_consumption.txt"
## startRow - first row of the data file to be read
## readCount - number of rows of the data file to be read
readHouseholdPowerData <-
function(fileName, startRow, readCount) {
colClasses <- c("character", "character", rep("numeric", 7))
con <- file(fileName)
open(con)
headers <- unlist(read.table(con, sep = ";", nrows = 1,
colClasses = "character",
header = FALSE,
stringsAsFactors = FALSE)[1, ]
)
data <- read.table(con, header = TRUE, sep = ";",
colClasses = colClasses,
skip = (startRow - 1), nrows = readCount,
stringsAsFactors = FALSE)
names(data) <- make.names(headers) #, allow_ = FALSE)
close(con)
return(data)
}
# Read data for just 2007-02-01 and 2007-02-02. This data starts at row 66638
# and continues for 2880 records. See comments in plot1.R for details.
data <- readHouseholdPowerData(fileName = "household_power_consumption.txt",
startRow = 66638, readCount = 2880)
dtFormat <- "%d/%m/%Y %H:%M:%S"
# dplyr mutate doesn' support POSIXlt, so use base R method
data$datetime <- strptime(paste(data$Date, data$Time), dtFormat)
# write directly to the png device (bypassing window) to fix legend trunc'n:
# http://stackoverflow.com/questions/9400194/legend-truncated-when-saving-as-pdf-using-saveplot
png(file = "plot3.png", width = 480, height = 480, units = "px")
with(data, plot(datetime, Sub_metering_1, type = "n",
xlab = "", ylab = "Energy sub metering"))
with(data, lines(datetime, Sub_metering_1, col = "black"))
with(data, lines(datetime, Sub_metering_2, col = "red"))
with(data, lines(datetime, Sub_metering_3, col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c(names(data[7]), names(data[8]), names(data[9])))
dev.off()
|
b91a635cd2c967eb1429d9614c5161c2c9ea69bb
|
54494856f914c185be05a5acaa3f3527068e6af8
|
/Contest1/models/pca_prep.R
|
3fcca4f3cd903fe7588b48253231bb00218db24d
|
[] |
no_license
|
tmastny/machine_learning_class
|
eba8ca58ebef177fdd3d98ac89982412abfc1ad1
|
55020ea278aba7ee99176627a10e52f90a204cb3
|
refs/heads/master
| 2021-04-27T19:30:46.426001
| 2018-03-26T20:49:48
| 2018-03-26T20:49:48
| 122,357,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 660
|
r
|
pca_prep.R
|
library(tidyverse)
library(recipes)
library(caret)
paintings <- read_csv('Contest1/train.csv')
test_data <- read_csv('Contest1/test.csv')
paint_recipe <- recipe(class ~ ., data = head(paintings))
paint_recipe <- paint_recipe %>%
step_center(all_predictors()) %>%
step_scale(all_predictors()) %>%
step_pca(all_predictors(), threshold = 0.75)
pca_paint_trained <- prep(paint_recipe, training = paintings)
training_data <- bake(pca_paint_trained, newdata = paintings)
write_csv(training_data, 'Contest1/models/pca_data.csv')
testing_data <- bake(pca_paint_trained, newdata = test_data)
write_csv(testing_data, 'Contest1/models/testing_data.csv')
|
ec36cbf06982c91b8e86187e590cf534a2603ff0
|
a01c9b894cbfe75c924bdf8d563e4021fb3b1e0d
|
/Code for separate plots/screes.R
|
ec9c3f7375ead251b5f9d354241d1c72d60792de
|
[] |
no_license
|
hansronald/kandidaten
|
5a7fc930ffb74013c30fff47100d46faae0f3bc8
|
b78badf6ce840967bf8d0fbbbaf8837e664590de
|
refs/heads/master
| 2018-07-15T03:41:10.079267
| 2018-06-01T14:22:29
| 2018-06-01T14:22:29
| 126,025,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,378
|
r
|
screes.R
|
setwd("/Users/rebecka/Documents/Dokument/MSG900/Kod/precisionsmatriser")
prec = as.matrix(read.table("model_p200.csv", sep=','))
cov200 = solve(prec)
n = 80
datagen = mvrnorm(n = n, mu = integer(dim(cov200)[1]), Sigma = cov200, tol = 1e-6, empirical = FALSE, EISPACK = FALSE)
cov200 = cov(datagen)
prec = as.matrix(read.table("model_p500.csv",sep=','))
cov500 = solve(prec)
n = 200
datagen = mvrnorm(n = n, mu = integer(dim(cov500)[1]), Sigma = cov500, tol = 1e-6, empirical = FALSE, EISPACK = FALSE)
cov500 = cov(datagen)
prec = as.matrix(read.table("Model-4-200.csv",sep=','))
cov4 = solve(prec)
n = 80
datagen = mvrnorm(n = n, mu = integer(dim(cov4)[1]), Sigma = cov4, tol = 1e-6, empirical = FALSE, EISPACK = FALSE)
cov4 = cov(datagen)
prec = as.matrix(read.table("model_4-200-offlinks_new.csv",sep=','))
cov4plus = solve(prec)
n = 80
datagen = mvrnorm(n = n, mu = integer(dim(cov4plus)[1]), Sigma = cov4plus, tol = 1e-6, empirical = FALSE, EISPACK = FALSE)
cov4plus = cov(datagen)
tol = 1e-10
par(mar=c(5.1, 5.1, 4.1, 2.1))
E = eigen(cov200)
E_values = E$values
E_values[which(abs(E_values) < tol)] = 0
D1 = diag(E_values)
D15 = diag(E_values)^(1/1.5)
D2 = diag(E_values)^(1/2)
P = E$vectors
cov200 = P %*% D1 %*% t(P)
cov200_15 = P %*% D15 %*% t(P)
cov200_2 = P %*% D2 %*% t(P)
range(E_values)
# kappa200 = signif(kappa(cov200),2)
# kappa200_15 = signif(kappa(cov200_15),2)
# kappa200_2 = signif(kappa(cov200_2),2)
kappa200 = signif(max(E_values)/min(E_values[which(E_values != 0)]),2)
kappa200_15 = signif((max(E_values)^(1/1.5))/(min(E_values[which(E_values != 0)])^(1/1.5)),2)
kappa200_2 = signif((max(E_values)^(1/2))/(min(E_values[which(E_values != 0)])^(1/2)),2)
plot(E$values[seq(1,5)], ylab = "Egenvärden", xlab = "Index för egenvärden", ylim=c(0, max(E$values)), type = 'b', cex.lab=1.8, cex.axis=1.8)
lines(diag(D15), type="b", lty = 2)
lines(diag(D2),type="b", lty = 3)
legend("topright", c(paste("k = 1 (",kappa200,")",sep=""), paste("k = 1.5 (", kappa200_15, ")",sep=""), paste("k = 2 (", kappa200_2, ")",sep="")), lty = c(1,2,3), cex=1.8)
E = eigen(cov500)
E_values = E$values
E_values[which(abs(E_values) < tol)] = 0
D1 = diag(E_values)
D15 = diag(E_values)^(1/1.5)
D2 = diag(E_values)^(1/2)
P = E$vectors
cov500 = P %*% D1 %*% t(P)
cov500_15 = P %*% D15 %*% t(P)
cov500_2 = P %*% D2 %*% t(P)
range(E_values)
kappa500 = signif(kappa(cov500),2)
kappa500_15 = signif(kappa(cov500_15),2)
kappa500_2 = signif(kappa(cov500_2),2)
kappa500 = signif(max(E_values)/min(E_values[which(E_values != 0)]),2)
kappa500_15 = signif((max(E_values)^(1/1.5))/(min(E_values[which(E_values != 0)])^(1/1.5)),2)
kappa500_2 = signif((max(E_values)^(1/2))/(min(E_values[which(E_values != 0)])^(1/2)),2)
plot(E$values[seq(1,5)], ylab = "Egenvärden", xlab = "Index för egenvärden", ylim=c(0, max(E$values)), type = 'b', cex.lab=1.8, cex.axis=1.8)
lines(diag(D15), type="b", lty = 2)
lines(diag(D2), type="b",lty = 3)
legend("topright", c(paste("k = 1 (", kappa500, ")",sep=""), paste("k = 1.5 (", kappa500_15, ")",sep=""), paste("k = 2 (", kappa500_2, ")",sep="")), lty = c(1,2,3), cex=1.8)
E = eigen(cov4)
E_values = E$values
E_values[which(abs(E_values) < tol)] = 0
D1 = diag(E_values)
D15 = diag(E_values)^(1/1.5)
D2 = diag(E_values)^(1/2)
P = E$vectors
cov4 = P %*% D1 %*% t(P)
cov4_15 = P %*% D15 %*% t(P)
cov4_2 = P %*% D2 %*% t(P)
range(E_values)
# kappa4 = signif(kappa(cov4),2)
# kappa4_15 = signif(kappa(cov4_15),2)
# kappa4_2 = signif(kappa(cov4_2),2)
kappa4 = signif(max(E_values)/min(E_values[which(E_values != 0)]),2)
kappa4_15 = signif((max(E_values)^(1/1.5))/(min(E_values[which(E_values != 0)])^(1/1.5)),2)
kappa4_2 = signif((max(E_values)^(1/2))/(min(E_values[which(E_values != 0)])^(1/2)),2)
plot(E$values[seq(1,5)], ylab = "Egenvärden", xlab = "Index för egenvärden", ylim=c(0, max(E$values)), type = 'b', cex.lab=1.8, cex.axis=1.8)
lines(diag(D15), type="b", lty = 2)
lines(diag(D2), type="b",lty = 3)
legend("topright", c(paste("k = 1 (", kappa4, ")",sep=""), paste("k = 1.5 (", kappa4_15, ")",sep=""), paste("k = 2 (", kappa4_2, ")",sep="")), lty = c(1,2,3), cex=1.8)
E = eigen(cov4plus)
E_values = E$values
E_values[which(abs(E_values) < tol)] = 0
D1 = diag(E_values)
D15 = diag(E_values)^(1/1.5)
D2 = diag(E_values)^(1/2)
P = E$vectors
cov4plus = P %*% D1 %*% t(P)
cov4plus_15 = P %*% D15 %*% t(P)
cov4plus_2 = P %*% D2 %*% t(P)
range(E_values)
# kappa4plus = signif(kappa(cov4plus),2)
# kappa4plus_15 = signif(kappa(cov4plus_15),2)
# kappa4plus_2 = signif(kappa(cov4plus_2),2)
kappa4plus = signif(max(E_values)/min(E_values[which(E_values != 0)]),2)
kappa4plus_15 = signif((max(E_values)^(1/1.5))/(min(E_values[which(E_values != 0)])^(1/1.5)),2)
kappa4plus_2 = signif((max(E_values)^(1/2))/(min(E_values[which(E_values != 0)])^(1/2)),2)
plot(E$values[seq(1,5)], ylab = "Egenvärden", xlab = "Index för egenvärden", ylim=c(0, max(E$values)), type = 'b', cex.lab=1.8, cex.axis=1.8)
lines(diag(D15), type="b", lty = 2)
lines(diag(D2), type="b",lty = 3)
legend("topright", c(paste("k = 1 (", kappa4plus, ")",sep=""), paste("k = 1.5 (", kappa4plus_15, ")",sep=""), paste("k = 2 (", kappa4plus_2, ")",sep="")), lty = c(1,2,3), cex=1.8)
#----------
# par(mar=c(5.1, 5.1, 4.1, 2.1))
# E = eigen(sigmahat)
# E_values = E$values
# E_values[which(abs(E_values) < tol)] = 0
# D1 = diag(E_values)
# D15 = diag(E_values)^(1/1.5)
# D2 = diag(E_values)^(1/2)
# P = E$vectors
# sigmahat = P %*% D1 %*% t(P)
# sigmahat_15 = P %*% D15 %*% t(P)
# sigmahat_2 = P %*% D2 %*% t(P)
# range(E_values)
#
#
# # kappa200 = signif(kappa(cov200),2)
# # kappa200_15 = signif(kappa(cov200_15),2)
# # kappa200_2 = signif(kappa(cov200_2),2)
#
# kappasigmahat = signif(max(E_values)/min(E_values[which(E_values != 0)]),2)
# kappasigmahat_15 = signif((max(E_values)^(1/1.5))/(min(E_values[which(E_values != 0)])^(1/1.5)),2)
# kappasigmahat_2 = signif((max(E_values)^(1/2))/(min(E_values[which(E_values != 0)])^(1/2)),2)
#
plot(E$values[seq(1,5)], ylab = "Egenvärden", xlab = "Index för egenvärden", ylim=c(0, max(E$values)), type = 'b', cex.lab=1.8, cex.axis=1.8)
lines(diag(D15),type="b", lty = 2)
lines(diag(D2),type="b", lty = 3)
legend("topright", c(paste("k = 1 (", kappasigmahat, ")",sep=""), paste("k = 1.5 (", kappasigmahat_15, ")",sep=""), paste("k = 2 (", kappasigmahat_2, ")",sep="")), lty = c(1,2,3), cex=1.8)
#
#
|
f87aee12a3260ed48866119bc134784001b0f28a
|
be54ba5a39bafbfbe7935d15c8957b92ee61ff20
|
/scripts/access_spotify_data.R
|
ca0e2d7cd2ae5fb44567e6fde98a5d260667c0d6
|
[
"MIT"
] |
permissive
|
StephaininZ/spotify_data
|
f72d1baf7a5118554c85123376fc63c5518e80a6
|
8b65cf0686afe76cb6eb7704c0c73d5b7a81c6e6
|
refs/heads/main
| 2023-02-23T05:30:41.409377
| 2021-02-02T00:16:11
| 2021-02-02T00:16:11
| 335,115,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,197
|
r
|
access_spotify_data.R
|
#### Preamble ####
# Purpose: Access data retrieved from Spotify API
# Author: Yingying Zhou
# Data: 01 February 2021
# Contact: yingying.zhou@utoronto.ca
# License: MIT
# Pre-requisites:
# - Register for a Spotify developer account
# - Obtain Client ID and Client Secret from App and save locally in .Renviron
# - Need to have downloaded data needed and saved it to inputs/data
# - Don't forget to gitignore it!
#### Workspace setup ####
#devtools::install_github('charlie86/spotifyr')
library(spotifyr)
library(usethis)
usethis::edit_r_environ()
library(dplyr)
library(ggplot2)
library(httr)
library(httpuv)
library(kableExtra)
library(purrr)
#### Download data of interest ####
bts <- get_artist_audio_features('bts')
saveRDS(bts, "inputs/bts.rds")
bts <- readRDS("inputs/bts.rds")
names(bts) # check column variables
# View artist name, track name, and album name for BTS
bts %>%
select(artist_name, track_name, album_name) %>%
head()
# Compare the sentiment score distribution of BTS vs. BlackPink
bp <- get_artist_audio_features('blackpink')
saveRDS(bp, "inputs/bp.rds")
bp <- readRDS("inputs/bp.rds")
tibble(name = c(bp$artist_name, bts$artist_name),
year = c(bp$album_release_year, bts$album_release_year),
valence = c(bp$valence, bts$valence)
) %>%
ggplot(aes(x = year, y = valence, color = name)) +
geom_point() +
theme_minimal() +
labs(x = "Year",
y = "Valence",
color = "Name") +
scale_color_brewer(palette = "Set1")
# Find All Time Favorite Artists
top_artists <- get_my_top_artists_or_tracks(type = 'artists', time_range = 'long_term', limit = 15)
saveRDS(top_artists, "inputs/top_artists.rds")
top_artists <- readRDS("inputs/top_artists.rds")
top_artists %>%
select(name, popularity) %>%
kableExtra::kbl(caption = "All Time Favorite Artists") %>%
kableExtra::kable_styling()
# Find Favorite Tracks at the Moment
get_my_top_artists_or_tracks(type = 'tracks', time_range = 'short_term', limit = 5) %>%
mutate(artist.name = map_chr(artists, function(x) x$name[1])) %>%
select(name, artist.name, album.name) %>%
kableExtra::kbl(caption = "Favorite Tracks at the Moment") %>%
kableExtra::kable_styling()
|
be9032ba711afab634c63bf3dfb7bf0c50128b1e
|
fe87c441eb8e2994484666125469c395d4435140
|
/man/watbal.update.Rd
|
900c5b3fcac99844bce66bafc561e0beadb33568
|
[] |
no_license
|
ritviksahajpal/ZeBook
|
a655229af0e6b6038bbfe456592582b5e299477a
|
36d79718a605c2f996a7d1a44694860ff0621f2d
|
refs/heads/master
| 2021-01-13T17:23:28.970824
| 2013-06-17T00:00:00
| 2013-06-17T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 667
|
rd
|
watbal.update.Rd
|
\name{watbal.update}
\alias{watbal.update}
\title{WaterBalance model - calculate change in soil water for one day}
\usage{
watbal.update(WAT0, RAIN, ETr, param, WP, FC)
}
\arguments{
\item{WAT0}{: Water at the beginning of the day (mm).}
\item{RAIN}{: Rainfall of day (mm)}
\item{ETr}{: Evapotranspiration of day (mm)}
\item{param}{: a vector of parameters}
\item{FC}{: Water content at field capacity (cm^3.cm^-3)}
\item{WP}{: Water content at wilting Point (cm^3.cm^-3)}
}
\value{
WAT1 : Water at the beginning of the day+1 (mm).
}
\description{
WaterBalance model - calculate change in soil water for
one day
}
|
7e73ade10bfe90e45492e28daa5288fbf91f9837
|
9b294e57af2624950201131ba07e73e49c9600e3
|
/R/semi.markov.3states.ic.R
|
02b2ee47583ee5117dfcbfb348cffcd6b9b9fd62
|
[] |
no_license
|
cran/RISCA
|
fa0f27acfe1e9f492f0bf081d952e9285b8723b4
|
64415a11d20579620602aa7d04684acdd1811cbf
|
refs/heads/master
| 2023-04-06T02:21:48.922948
| 2023-03-22T17:50:02
| 2023-03-22T17:50:02
| 236,882,460
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 44,600
|
r
|
semi.markov.3states.ic.R
|
semi.markov.3states.ic <-function(times0, times1, times2, sequences, weights=NULL, dist, cuts.12=NULL,cuts.13=NULL,cuts.23=NULL,ini.dist.12=NULL, ini.dist.13=NULL, ini.dist.23=NULL, cov.12=NULL, init.cov.12=NULL, names.12=NULL, cov.13=NULL, init.cov.13=NULL, names.13=NULL, cov.23=NULL, init.cov.23=NULL, names.23=NULL, conf.int=TRUE, silent=TRUE, precision=10^(-6), legendre=30, homogeneous=TRUE)
{
#check conditions
if (missing(times0))
stop("Argument 'times0' is missing with no default")
if (missing(times1))
stop("Argument 'times1' is missing with no default")
if (missing(times2))
stop("Argument 'times2' is missing with no default")
if (missing(sequences))
stop("Argument 'sequences' is missing with no default")
if (missing(dist))
stop("Argument 'dist' is missing with no default")
if (!is.vector(times0) | !is.numeric(times0))
stop("Argument 'times0' must be a numeric vector")
if (min(times0,na.rm=T)<0)
stop("Negative values for 'times0' are not allowed")
if (is.na(min(times0)))
warning("individuals with missing values for 'times0' will be removed \n")
if (!is.vector(times1) | !is.numeric(times1))
stop("Argument 'times1' must be a numeric vector")
if (min(times1,na.rm=T)<0)
stop("Negative values for 'times1' are not allowed")
if (!is.vector(times2) | !is.numeric(times2))
stop("Argument 'times2' must be a numeric vector")
if (min(times2,na.rm=T)<0)
stop("Negative values for 'times2' are not allowed")
if (!is.vector(sequences) | !is.numeric(sequences) | (min(names(table(sequences)) %in% c(1,12,13,123))==0) )
stop("Argument 'sequences' must be a numeric vector with values 1, 12, 13, or 123")
if (min( c(1,12,13,123) %in% names(table(sequences))) ==0)
warning("all sequencess (1, 12, 13, 123) are not present \n ")
if (min(length(times0),length(times1),length(times2),length(sequences)) != max(length(times0),length(times1),length(times2),length(sequences)))
stop("Arguments 'times0', 'times1', 'times2', and 'sequences' need to have the same number of rows")
if (!all(is.na(times1[which(sequences==1 | sequences==13)])))
stop("Arguments 'times1' should be NA for individuals right-censored in X=1 or individuals who directly transited from X=1 to X=3")
if (!all(times0[which(sequences==1| sequences==13)]<= times2[which(sequences==1| sequences==13)],na.rm=T) | !all(times0[which(sequences==12| sequences==123)]<= times1[which(sequences==12| sequences==123)],na.rm=T) | !all(times1[which(sequences==12| sequences==123)]<= times2[which(sequences==12| sequences==123)],na.rm=T))
stop("Arguments 'times0','times1','times2': observed times do not respect a chronological order (times0<=times1<=times2)")
if (!all(times0[which(sequences==1| sequences==13)]!= times2[which(sequences==1| sequences==13)],na.rm=T))
warning("times0=times2 for some right-censored individuals in X=1 or who directly transited from X=1 to X=3 \n")
if (!all(times0[which(sequences==12| sequences==123)]!= times1[which(sequences==12| sequences==123)],na.rm=T))
warning("times0=times1 for some individuals who transited in X=2 \n")
if (!all(times1[which(sequences==12| sequences==123)]!= times2[which(sequences==12| sequences==123)],na.rm=T))
warning("times1=times2 for some individuals who transited in X=2 \n")
if(!is.null(weights))
{
if (!is.vector(weights) | !is.numeric(weights))
stop("Argument 'weights' must be a numeric vector")
if (min(weights,na.rm=T)<0)
stop("Negative values for 'weights' are not allowed")
if (is.na(min(weights)))
warning("individuals with missing values for 'weights' will be removed \n")
}
if(length(dist)!=3)
{stop("Argument 'dist' have to contain 3 values")}
if(!(dist[1] %in% c("PE","E","W","WG")))
{stop("Argument 'dist': incorrect distribution for transition 12")}
if(!(dist[2] %in% c("PE","E","W","WG")))
{stop("Argument 'dist': incorrect distribution for transition 13")}
if(!(dist[3] %in% c("PE","E","W","WG")))
{stop("Argument 'dist': incorrect distribution for transition 23")}
if(dist[1]!="PE" & (!is.null(cuts.12)))
{stop("Arguments 'cuts.12' is only allowed for piecewise exponential distribution (PE for the first argument in 'dist')")}
if(dist[2]!="PE" & (!is.null(cuts.13)))
{stop("Arguments 'cuts.13' is only allowed for piecewise exponential distribution (PE for the second argument in 'dist')")}
if(dist[3]!="PE" & (!is.null(cuts.23)))
{stop("Arguments 'cuts.23' is only allowed for piecewise exponential distribution (PE for the third argument in 'dist')")}
if(dist[1]=="PE" & !is.null(cuts.12))
{
if (!all(is.numeric(cuts.12)) | !all(!is.na(cuts.12)) | !all(cuts.12>0) | !all(is.finite(cuts.12)) | is.unsorted(cuts.12))
{stop("Arguments 'cuts.12' must be a sorted vector with only positive and finite numeric values (internal timepoints)")}
}
if(dist[1]=="PE" & !is.null(cuts.12))
{
if (max(cuts.12)>=max(times1,na.rm=T))
{stop("Arguments 'cuts.12': check internal timepoints or time units (last internal timepoint is greater or equal to the maximum value for times1)")}
}
if(dist[2]=="PE" & !is.null(cuts.13))
{
if (!all(is.numeric(cuts.13)) | !all(!is.na(cuts.13)) | !all(cuts.13>0) | !all(is.finite(cuts.13)) | is.unsorted(cuts.13))
{stop("Arguments 'cuts.13' must be a sorted vector with only positive and finite numeric values (internal timepoints)")}
}
if(dist[2]=="PE" & !is.null(cuts.13))
{
if (max(cuts.13)>=max(times1,na.rm=T))
{stop("Arguments 'cuts.13': check internal timepoints or time units (last internal timepoint is greater or equal to the maximum value for times1)")}
}
if(dist[3]=="PE" & !is.null(cuts.23))
{
if (!all(is.numeric(cuts.23)) | !all(!is.na(cuts.23)) | !all(cuts.23>0) | !all(is.finite(cuts.23)) | is.unsorted(cuts.23))
{stop("Arguments 'cuts.23' must be a sorted vector with only positive and finite numeric values (internal timepoints)")}
}
if(dist[3]=="PE" & !is.null(cuts.23))
{
if (max(cuts.23)>=max(times1,na.rm=T))
{stop("Arguments 'cuts.23': check internal timepoints or time units (last internal timepoint is greater or equal to the maximum value for times1)")}
}
if(!is.null(ini.dist.12) & !is.numeric(ini.dist.12))
{stop("Argument 'ini.dist.12' must be a numeric vector (default is NULL)")}
if(!is.null(ini.dist.13) & !is.numeric(ini.dist.13))
{stop("Argument 'ini.dist.13' must be a numeric vector (default is NULL)")}
if(!is.null(ini.dist.23) & !is.numeric(ini.dist.23))
{stop("Argument 'ini.dist.23' must be a numeric vector (default is NULL)")}
if(dist[1]=="PE" & !is.null(ini.dist.12) & length(ini.dist.12)!=(length(cuts.12)+1))
{stop("Incorrect number of parameters initialized for transition 12 (piecewise model)")}
if(dist[2]=="PE" & !is.null(ini.dist.13) & length(ini.dist.13)!=(length(cuts.13)+1))
{stop("Incorrect number of parameters initialized for transition 13 (piecewise model)")}
if(dist[3]=="PE" & !is.null(ini.dist.23) & length(ini.dist.23)!=(length(cuts.23)+1))
{stop("Incorrect number of parameters initialized for transition 23 (piecewise model)")}
if( (dist[1]=="E" & is.null(cuts.12) & !is.null(ini.dist.12) & length(ini.dist.12)!=1) )
{stop("Exponential distribution (transition 12) needs initialization of one parameter")}
if( (dist[1]=="W" & is.null(cuts.12) & !is.null(ini.dist.12) & length(ini.dist.12)!=2) )
{stop("Weibull distribution (transition 12) needs initialization of two parameters")}
if( (dist[1]=="WG" & is.null(cuts.12) & !is.null(ini.dist.12) & length(ini.dist.12)!=3) )
{stop("Generalized Weibull distribution (transition 12) needs initialization of three parameters")}
if( (dist[2]=="E" & is.null(cuts.13) & !is.null(ini.dist.13) & length(ini.dist.13)!=1) )
{stop("Exponential distribution (transition 13) needs initialization of one parameter")}
if( (dist[2]=="W" & is.null(cuts.13) & !is.null(ini.dist.13) & length(ini.dist.13)!=2) )
{stop("Weibull distribution (transition 13) needs initialization of two parameters")}
if( (dist[2]=="WG" & is.null(cuts.13) & !is.null(ini.dist.13) & length(ini.dist.13)!=3) )
{stop("Generalized Weibull distribution (transition 13) needs initialization of three parameters")}
if( (dist[3]=="E" & is.null(cuts.23) & !is.null(ini.dist.23) & length(ini.dist.23)!=1) )
{stop("Exponential distribution (transition 23) needs initialization of one parameter")}
if( (dist[3]=="W" & is.null(cuts.23) & !is.null(ini.dist.23) & length(ini.dist.23)!=2) )
{stop("Weibull distribution (transition 23) needs initialization of two parameters")}
if( (dist[3]=="WG" & is.null(cuts.23) & !is.null(ini.dist.23) & length(ini.dist.23)!=3) )
{stop("Generalized Weibull distribution (transition 23) needs initialization of three parameters")}
if(!is.null(cov.12))
{
if ((!is.vector(cov.12) & !is.data.frame(cov.12) & !is.matrix(cov.12)) | !all(sapply(cov.12,is.numeric)))
{stop("Argument 'cov.12' must be a numeric matrix or data.frame (default is NULL)")}
if (nrow(data.frame(cov.12))!=length(times1))
{stop("Argument 'cov.12' needs to have the same number of rows than 'times1'")}
if (sum(apply(sapply(data.frame(cov.12),is.na),1,sum))>0)
warning(sum(apply(sapply(data.frame(cov.12),is.na),1,sum))," individuals with missing values on 'cov.12' will be removed from the analysis \n")
if(!is.null(init.cov.12))
{
if (!is.numeric(init.cov.12))
{stop("Argument 'init.cov.12' must be a numeric vector (default is NULL)")}
if (ncol(data.frame(cov.12))!=length(init.cov.12))
{stop("Argument 'init.cov.12' needs to have the same length than number of columns of 'cov.12'")}
}
if (!is.null(names.12))
{
if (!is.character(names.12))
{stop("Argument 'names.12' must be a character vector (default is NULL)")}
if (ncol(data.frame(cov.12))!=length(names.12))
{stop("Argument 'names.12' needs to have the same length than number of columns of 'cov.12'")}
}
}
if(!is.null(cov.13))
{
if ((!is.vector(cov.13) & !is.data.frame(cov.13) & !is.matrix(cov.13)) | !all(sapply(cov.13,is.numeric)))
{stop("Argument 'cov.13' must be a numeric matrix or data.frame (default is NULL)")}
if (nrow(data.frame(cov.13))!=length(times1))
{stop("Argument 'cov.13' needs to have the same number of rows than 'times1'")}
if (sum(apply(sapply(data.frame(cov.13),is.na),1,sum))>0)
warning(sum(apply(sapply(data.frame(cov.13),is.na),1,sum))," individuals with missing values on 'cov.13' will be removed from the analysis \n")
if(!is.null(init.cov.13))
{
if (!is.numeric(init.cov.13))
{stop("Argument 'init.cov.13' must be a numeric vector (default is NULL)")}
if (ncol(data.frame(cov.13))!=length(init.cov.13))
{stop("Argument 'init.cov.13' needs to have the same length than number of columns of 'cov.13'")}
}
if (!is.null(names.13))
{
if (!is.character(names.13))
{stop("Argument 'names.13' must be a character vector (default is NULL)")}
if (ncol(data.frame(cov.13))!=length(names.13))
{stop("Argument 'names.13' needs to have the same length than number of columns of 'cov.13'")}
}
}
if(!is.null(cov.23))
{
if ((!is.vector(cov.23) & !is.data.frame(cov.23) & !is.matrix(cov.23)) | !all(sapply(cov.23,is.numeric)))
{stop("Argument 'cov.23' must be a numeric matrix or data.frame (default is NULL)")}
if (nrow(data.frame(cov.23))!=length(times2))
{stop("Argument 'cov.23' needs to have the same number of rows than 'times1'")}
if (sum(apply(sapply(data.frame(cov.23),is.na),1,sum))>0)
warning(sum(apply(sapply(data.frame(cov.23),is.na),1,sum))," individuals with missing values on 'cov.23' will be removed from the analysis \n")
if(!is.null(init.cov.23))
{
if (!is.numeric(init.cov.23))
{stop("Argument 'init.cov.23' must be a numeric vector (default is NULL)")}
if (ncol(data.frame(cov.23))!=length(init.cov.23))
{stop("Argument 'init.cov.23' needs to have the same length than number of columns of 'cov.23'")}
}
if (!is.null(names.23))
{
if (!is.character(names.23))
{stop("Argument 'names.23' must be a character vector (default is NULL)")}
if (ncol(data.frame(cov.23))!=length(names.23))
{stop("Argument 'names.23' needs to have the same length than number of columns of 'cov.23'")}
}
}
if(!(conf.int %in% c("TRUE","FALSE")))
{stop("Argument 'conf.int' must be TRUE or FALSE (default is TRUE)")}
if(!is.null(precision))
{
if(!is.numeric(precision))
{stop("Argument 'precision' must be numeric (default is 0)")}
if(precision<0)
{stop("Argument 'precision' must be greater or equal to 0 (default is 0)")}
}
if(!(silent %in% c("TRUE","FALSE")))
{stop("Argument 'silent' must be TRUE or FALSE (default is TRUE)")}
if(!is.null(legendre))
{
if(legendre!=round(legendre) | legendre<=0)
{stop("Argument 'legendre' must be a positive integer (default is 30)")}
}
coef12<-NULL
sigma12<-NULL
nu12<-NULL
theta12<-NULL
coef13<-NULL
sigma13<-NULL
nu13<-NULL
theta13<-NULL
coef23<-NULL
nu23<-NULL
theta23<-NULL
sigma23<-NULL
if (homogeneous==TRUE)
{
#sojourn time distributions
if(dist[1]=="WG" | dist[1]=="W" | (dist[1]=="E" & is.null(cuts.12)))
{
H12<-function(t,z,cuts) { exp(as.matrix(z) %*% coef12) * ((((1+(t/sigma12)^nu12))^(1/theta12))-1) }
h12<-function(t,z,cuts) { exp(as.matrix(z) %*% coef12)*(1/theta12*(1+(t/sigma12)^nu12)^(1/theta12-1))*(nu12*((1/sigma12)^nu12)*t^(nu12-1)) }
}
if(dist[1]=="PE" & !is.null(cuts.12))
{
cuts.12 <- sort(cuts.12)
if ((cuts.12[1] <= 0) || (cuts.12[length(cuts.12)] == Inf))
stop("'cuts.12' must be positive and finite.")
cuts.12 <- c(0, cuts.12, Inf)
H12<-function(t,z,cuts) {
H<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
H<-H+(1*(t>=cuts[i]))*exp(as.matrix(z) %*% coef12)*((pmin(cuts[i+1],t)-cuts[i])/sigma12[i])
}
return(H)
rm(H)
}
h12<-function(t,z,cuts) {
h<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
h<-h+(1*(t>=cuts[i])*(t<cuts[i+1]))* exp(as.matrix(z) %*% coef12)*(1/sigma12[i])
}
return(h)
rm(h)
}
}
if(dist[2]=="WG" | dist[2]=="W" | (dist[2]=="E" & is.null(cuts.13)))
{
H13<-function(t,z,cuts) { exp(as.matrix(z) %*% coef13) * ((((1+(t/sigma13)^nu13))^(1/theta13))-1) }
h13<-function(t,z,cuts) { exp(as.matrix(z) %*% coef13)*(1/theta13*(1+(t/sigma13)^nu13)^(1/theta13-1))*(nu13*((1/sigma13)^nu13)*t^(nu13-1)) }
}
if(dist[2]=="PE" & !is.null(cuts.13))
{
cuts.13 <- sort(cuts.13)
if ((cuts.13[1] <= 0) || (cuts.13[length(cuts.13)] == Inf))
stop("'cuts.13' must be positive and finite.")
cuts.13 <- c(0, cuts.13, Inf)
H13<-function(t,z,cuts) {
H<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
H<-H+(1*(t>=cuts[i]))*exp(as.matrix(z) %*% coef13)*((pmin(cuts[i+1],t)-cuts[i])/sigma13[i])
}
return(H)
rm(H)
}
h13<-function(t,z,cuts) {
h<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
h<-h+(1*(t>=cuts[i])*(t<cuts[i+1]))* exp(as.matrix(z) %*% coef13)*(1/sigma13[i])
}
return(h)
rm(h)
}
}
if(dist[3]=="WG" | dist[3]=="W" | (dist[3]=="E" & is.null(cuts.23)))
{
H23<-function(t,z,cuts) { exp(as.matrix(z) %*% coef23) * ((((1+(t/sigma23)^nu23))^(1/theta23))-1) }
h23<-function(t,z,cuts) { exp(as.matrix(z) %*% coef23)*(1/theta23*(1+(t/sigma23)^nu23)^(1/theta23-1))*(nu23*((1/sigma23)^nu23)*t^(nu23-1)) }
}
if(dist[3]=="PE" & !is.null(cuts.23))
{
cuts.23 <- sort(cuts.23)
if ((cuts.23[1] <= 0) || (cuts.23[length(cuts.23)] == Inf))
stop("'cuts.23' must be positive and finite.")
cuts.23 <- c(0, cuts.23, Inf)
H23<-function(t,z,cuts) {
H<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
H<-H+(1*(t>=cuts[i]))*exp(as.matrix(z) %*% coef23)*((pmin(cuts[i+1],t)-cuts[i])/sigma23[i])
}
return(H)
rm(H)
}
h23<-function(t,z,cuts) {
h<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
h<-h+(1*(t>=cuts[i])*(t<cuts[i+1]))* exp(as.matrix(z) %*% coef23)*(1/sigma23[i])
}
return(h)
rm(h)
}
}
noeuds<-gauss.quad(legendre, kind="legendre", alpha=0, beta=0)$nodes
poids<-gauss.quad(legendre, kind="legendre", alpha=0, beta=0)$weights
#contributions to the log-likelihood
i13 <- function(binf, bsup, y12, y13, y23, cutimes12, cutimes13, cutimes23)
{
m <- 0.5*(bsup - binf)
p <- 0.5*(bsup + binf)
.temp <- 0
for(i in 1:legendre)
{
.temp <- .temp + poids[i] * h12(m * noeuds[i] + p, y12, cutimes12) * exp(-H12(m * noeuds[i] + p, y12, cutimes12) - H13(m * noeuds[i] + p, y13, cutimes13)) * h23(bsup - m * noeuds[i] - p, y23, cutimes23) * exp(-H23(bsup - m * noeuds[i] - p, y23, cutimes23))
}
return(m*(.temp))
}
c13<-function(d0, d2, z12, z13, z23, cutimes12, cutimes13, cutimes23)
{return( h13(d2, z13, cutimes13) * exp(- H12(d2, z12, cutimes12) - H13(d2, z13, cutimes13)) + i13(d0, d2, z12, z13, z23, cutimes12, cutimes13, cutimes23) )}
c123 <- function(binf, bsup, tot, y12, y13, y23, cutimes12, cutimes13, cutimes23)
{
m <- 0.5*(bsup - binf)
p <- 0.5*(bsup + binf)
.temp <- 0
for(i in 1:legendre)
{
.temp <- .temp + poids[i] * h12(m * noeuds[i] + p, y12, cutimes12) * exp(-H12(m * noeuds[i] + p, y12, cutimes12) - H13(m * noeuds[i] + p, y13, cutimes13)) * h23(tot - m * noeuds[i] - p, y23, cutimes23) * exp(-H23(tot - m * noeuds[i] - p, y23, cutimes23))
}
return(m*(.temp))
}
i1 <- function(binf, bsup, y12, y13, y23, cutimes12, cutimes13, cutimes23)
{
m <- 0.5*(bsup - binf)
p <- 0.5*(bsup + binf)
.temp <- 0
for(i in 1:legendre)
{
.temp <- .temp + poids[i] * h12(m * noeuds[i] + p, y12, cutimes12) * exp(-H12(m * noeuds[i] + p, y12, cutimes12) -H13(m * noeuds[i] + p, y13, cutimes13) ) * exp(-H23(bsup - m * noeuds[i] - p, y23, cutimes23))
}
return(m*(.temp))
}
c1<-function(d0, d2, z12, z13, z23, cutimes12, cutimes13, cutimes23)
{return( exp(- H12(d2, z12, cutimes12) - H13(d2, z13, cutimes13)) + i1(d0, d2, z12, z13, z23, cutimes12, cutimes13, cutimes23) )}
c12 <- function(binf, bsup, tot, y12, y13, y23, cutimes12, cutimes13, cutimes23)
{
m <- 0.5*(bsup - binf)
p <- 0.5*(bsup + binf)
.temp <- 0
for(i in 1:legendre)
{
.temp <- .temp + poids[i] * h12(m * noeuds[i] + p, y12, cutimes12) * exp(-H12(m * noeuds[i] + p, y12, cutimes12) - H13(m * noeuds[i] + p, y13, cutimes13)) * exp(-H23(tot - m * noeuds[i] - p, y23, cutimes23))
}
return(m*(.temp))
}
#missing data
.D <- cbind(times0, times2, cov.12, cov.13, cov.23, weights)
.na <- (!is.na(apply(.D, FUN="sum", MARGIN=1)))
#initialization of the parameters
if (is.null(cov.12)) {cov.12.mat <- cbind(rep(0, length(times1))); n.12 <- NULL} else { cov.12.mat <- cbind(cov.12); n.12 <- paste("covariate(s) on trans. 12: num", 1:ncol(data.frame(cov.12))); if(!is.null(names.12)) {n.12 <- names.12} }
if (is.null(cov.13)) {cov.13.mat <- cbind(rep(0, length(times1))); n.13 <- NULL} else { cov.13.mat <- cbind(cov.13); n.13 <- paste("covariate(s) on trans. 13: num", 1:ncol(data.frame(cov.13))); if(!is.null(names.13)) {n.13 <- names.13} }
if (is.null(cov.23)) {cov.23.mat <- cbind(rep(0, length(times1))); n.23 <- NULL} else { cov.23.mat <- cbind(cov.23); n.23 <- paste("covariate(s) on trans. 23: num", 1:ncol(data.frame(cov.23))); if(!is.null(names.23)) {n.23 <- names.23} }
if (is.null(ini.dist.12)) {i.12.dist<-rep(0, 1*(dist[1]=="E" & is.null(cuts.12)) + 2*(dist[1]=="W") + 3*(dist[1]=="WG") + 1*(dist[1]=="PE" & !is.null(cuts.12))*(length(cuts.12)-1))}
else {i.12.dist<-ini.dist.12}
if (is.null(ini.dist.13)) {i.13.dist<-rep(0, 1*(dist[2]=="E" & is.null(cuts.13)) + 2*(dist[2]=="W") + 3*(dist[2]=="WG") + 1*(dist[2]=="PE" & !is.null(cuts.13))*(length(cuts.13)-1))}
else {i.13.dist<-ini.dist.13}
if (is.null(ini.dist.23)) {i.23.dist<-rep(0, 1*(dist[3]=="E" & is.null(cuts.23)) + 2*(dist[3]=="W") + 3*(dist[3]=="WG") + 1*(dist[3]=="PE" & !is.null(cuts.23))*(length(cuts.23)-1))}
else {i.23.dist<-ini.dist.23}
if (!is.null(init.cov.12)) {i.12<-init.cov.12}
if (is.null(init.cov.12) & is.null(cov.12)) {i.12<-NULL}
if (is.null(init.cov.12) & !is.null(cov.12)) {i.12<-rep(0, ncol(data.frame(cov.12)))}
if (!is.null(init.cov.13)) {i.13<-init.cov.13}
if (is.null(init.cov.13) & is.null(cov.13)) {i.13<-NULL}
if (is.null(init.cov.13) & !is.null(cov.13)) {i.13<-rep(0, ncol(data.frame(cov.13)))}
if (!is.null(init.cov.23)) {i.23<-init.cov.23}
if (is.null(init.cov.23) & is.null(cov.23)) {i.23<-NULL}
if (is.null(init.cov.23) & !is.null(cov.23)) {i.23<-rep(0, ncol(data.frame(cov.23)))}
ini <- c(i.12.dist, i.13.dist, i.23.dist, i.12, i.13, i.23)
if (is.null(weights)) {w <- rep(1, length(times1))} else {w <- weights }
#parameters for contributions associated to each transition
.w1 <- w[(sequences==1 & .na)]
.times1.0 <- times0[(sequences==1 & .na)]
.times1.2 <- times2[(sequences==1 & .na)]
.c1.12 <- cov.12.mat[(sequences==1 & .na),]
.c1.13 <- cov.13.mat[(sequences==1 & .na),]
.c1.23 <- cov.23.mat[(sequences==1 & .na),]
.w13 <- w[(sequences==13 & .na)]
.times13.0 <- times0[(sequences==13 & .na)]
.times13.2 <- times2[(sequences==13 & .na)]
.c13.12 <- cov.12.mat[(sequences==13 & .na),]
.c13.13 <- cov.13.mat[(sequences==13 & .na),]
.c13.23 <- cov.23.mat[(sequences==13 & .na),]
.w123 <- w[(sequences==123 & .na)]
.times123.0 <- times0[(sequences==123 & .na)]
.times123.1 <- times1[(sequences==123 & .na)]
.times123.2 <- times2[(sequences==123 & .na)]
.c123.12 <- cov.12.mat[(sequences==123 & .na),]
.c123.13 <- cov.13.mat[(sequences==123 & .na),]
.c123.23 <- cov.23.mat[(sequences==123 & .na),]
.w12 <- w[(sequences==12 & .na)]
.times12.0 <- times0[(sequences==12 & .na)]
.times12.1 <- times1[(sequences==12 & .na)]
.times12.2 <- times2[(sequences==12 & .na)]
.c12.12 <- cov.12.mat[(sequences==12 & .na),]
.c12.13 <- cov.13.mat[(sequences==12 & .na),]
.c12.23 <- cov.23.mat[(sequences==12 & .na),]
#log-likelihood
logV<-function(x)
{
if (dist[1]=="E" & is.null(cuts.12)) {assign("sigma12", exp(x[1]), inherits = TRUE); assign("nu12", 1, inherits = TRUE); assign("theta12", 1, inherits = TRUE); i<-1}
if (dist[1]=="W") {assign("sigma12", exp(x[1]), inherits = TRUE); assign("nu12", exp(x[2]), inherits = TRUE); assign("theta12", 1, inherits = TRUE); i<-2}
if (dist[1]=="WG") {assign("sigma12", exp(x[1]), inherits = TRUE); assign("nu12", exp(x[2]), inherits = TRUE); assign("theta12", exp(x[3]), inherits = TRUE); i<-3}
if (dist[1]=="PE" & !is.null(cuts.12)) {assign("sigma12", exp(x[1:(length(cuts.12)-1)]), inherits = TRUE); i<-(length(cuts.12)-1)}
if (dist[2]=="E" & is.null(cuts.13)) {assign("sigma13", exp(x[i+1]), inherits = TRUE); assign("nu13", 1, inherits = TRUE); assign("theta13", 1, inherits = TRUE); i<-i+1}
if (dist[2]=="W") {assign("sigma13", exp(x[i+1]), inherits = TRUE); assign("nu13", exp(x[i+2]), inherits = TRUE); assign("theta13", 1, inherits = TRUE); i<-i+2}
if (dist[2]=="WG") {assign("sigma13", exp(x[i+1]), inherits = TRUE); assign("nu13", exp(x[i+2]), inherits = TRUE); assign("theta13", exp(x[i+3]), inherits = TRUE); i<-i+3}
if (dist[2]=="PE" & !is.null(cuts.13)) {assign("sigma13", exp(x[(i+1):(i+length(cuts.13)-1)]), inherits = TRUE); i<-(i+length(cuts.13)-1)}
if (dist[3]=="E" & is.null(cuts.23)) {assign("sigma23", exp(x[i+1]), inherits = TRUE); assign("nu23", 1, inherits = TRUE); assign("theta23", 1, inherits = TRUE); i<-i+1}
if (dist[3]=="W") {assign("sigma23", exp(x[i+1]), inherits = TRUE); assign("nu23", exp(x[i+2]), inherits = TRUE); assign("theta23", 1, inherits = TRUE); i<-i+2}
if (dist[3]=="WG") {assign("sigma23", exp(x[i+1]), inherits = TRUE); assign("nu23", exp(x[i+2]), inherits = TRUE); assign("theta23", exp(x[i+3]), inherits = TRUE); i<-i+3}
if (dist[3]=="PE" & !is.null(cuts.23)) {assign("sigma23", exp(x[(i+1):(i+length(cuts.23)-1)]), inherits = TRUE); i<-(i+length(cuts.23)-1)}
if (is.null(cov.12)) {assign("coef12", 0, inherits = TRUE)}
else {assign("coef12", x[(i+1):(i+ncol(data.frame(cov.12)))], inherits = TRUE); i <-i+ncol(data.frame(cov.12))}
if (is.null(cov.13)) {assign("coef13", 0, inherits = TRUE)}
else {assign("coef13", x[(i+1):(i+ncol(data.frame(cov.13)))], inherits = TRUE); i <-i+ncol(data.frame(cov.13))}
if (is.null(cov.23)) {assign("coef23", 0, inherits = TRUE)}
else {assign("coef23", x[(i+1):(i+ncol(data.frame(cov.23)))], inherits = TRUE); i <-i+ncol(data.frame(cov.23))}
return( -1*(
sum( .w1 * log(c1(.times1.0, .times1.2, .c1.12, .c1.13, .c1.23, cuts.12, cuts.13, cuts.23)) ) +
sum( .w13 * log(c13(.times13.0, .times13.2, .c13.12, .c13.13, .c13.23, cuts.12, cuts.13, cuts.23)) ) +
sum( .w123 * log(c123(.times123.0, .times123.1, .times123.2, .c123.12, .c123.13, .c123.23, cuts.12, cuts.13, cuts.23)) ) +
sum( .w12 * log(c12(.times12.0, .times12.1, .times12.2, .c12.12, .c12.13, .c12.23, cuts.12, cuts.13, cuts.23)) ) ) )
}
#first maximum likelihood optimization
n<-1
res<-tryCatch(optim(ini, logV, hessian=conf.int, control=list(maxit=100000)))
if(inherits(res, "error")) {
warning("Maximum likelihood optimization fails to converge", "\n")
} else {
if(silent==FALSE) {warning(-1*res$value, "\n")}
#further maximum likelihood optimizations
if(is.null(precision)) {delta <- 10^(-6)} else {delta <-precision}
while (n<=2 & !(inherits(res, "error"))) {
temp.value<-res$value
res<-tryCatch(optim(res$par, logV, hessian=conf.int, control=list(maxit=100000)))
if (!(inherits(res, "error"))) {
n<-1*((temp.value-res$value)>delta) + (n+1)*((temp.value-res$value)<=delta)
if(silent==FALSE) {warning(-1*res$value, "\n")} }
}
if(inherits(res, "error")) {
warning("Maximum likelihood optimization fails to converge", "\n")
} else {
#output
if (conf.int==TRUE) {
if (max(!is.na(tryCatch(solve(res$hessian), error=function(e) NA)),na.rm=F)==1){
table.res <- data.frame(Estimate = round(res$par, 4),
SE = round(sqrt(diag(solve(res$hessian))), 4),
Wald = round(res$par/sqrt(diag(solve(res$hessian))), 4),
Pvalue = round(2*(1-pnorm(abs(res$par/sqrt(diag(solve(res$hessian)))), 0, 1)) , 4) )
names(table.res)<-c("Estimate","Std.Error","t.value","Pr(>|t|)")
table.covariance<-solve(res$hessian)
}
else {
table.res <- data.frame(Estimate = round(res$par, 4) )
table.covariance<-NULL
warning("\n Hessian matrix not defined", "\n")
} #end else for hessian matrix condition
}
if (conf.int==FALSE) {
table.res <- data.frame(Coef = round(res$par, 4) )
table.covariance<-NULL
}
if (dist[1]=="E" & is.null(cuts.12)) { lab12<-c("log(sigma) on trans. 12")}
if (dist[1]=="W" & is.null(cuts.12)) { lab12<-c("log(sigma) on trans. 12", "log(nu) on trans. 12")}
if (dist[1]=="WG" & is.null(cuts.12)) { lab12<-c("log(sigma) on trans. 12", "log(nu) on trans. 12", "log(theta) on trans. 12")}
if (dist[1]=="PE" & !is.null(cuts.12)) {
lab12<-rep("",length(cuts.12)-1)
for (i in (1:(length(cuts.12)-1)))
{
lab12[i]<-paste("log(sigma) on trans. 12, interval [",cuts.12[i],";",cuts.12[i+1],"[",sep="")
}
}
if (dist[2]=="E" & is.null(cuts.13)) { lab13<-c("log(sigma) on trans. 13")}
if (dist[2]=="W" & is.null(cuts.13)) { lab13<-c("log(sigma) on trans. 13", "log(nu) on trans. 13")}
if (dist[2]=="WG" & is.null(cuts.13)) { lab13<-c("log(sigma) on trans. 13", "log(nu) on trans. 13", "log(theta) on trans. 13")}
if (dist[2]=="PE" & !is.null(cuts.13)) {
lab13<-rep("",length(cuts.13)-1)
for (i in (1:(length(cuts.13)-1)))
{
lab13[i]<-paste("log(sigma) on trans. 13, interval [",cuts.13[i],";",cuts.13[i+1],"[",sep="")
}
}
if (dist[3]=="E" & is.null(cuts.23)) { lab23<-c("log(sigma) on trans. 23")}
if (dist[3]=="W" & is.null(cuts.23)) { lab23<-c("log(sigma) on trans. 23", "log(nu) on trans. 23")}
if (dist[3]=="WG" & is.null(cuts.23)) { lab23<-c("log(sigma) on trans. 23", "log(nu) on trans. 23", "log(theta) on trans. 23")}
if (dist[3]=="PE" & !is.null(cuts.23)) {
lab23<-rep("",length(cuts.23)-1)
for (i in (1:(length(cuts.23)-1)))
{
lab23[i]<-paste("log(sigma) on trans. 23, interval [",cuts.23[i],";",cuts.23[i+1],"[",sep="")
}
}
lab<-c(lab12, lab13, lab23, n.12, n.13, n.23)
rownames(table.res) <- paste(1:length(lab), lab)
warning("\n Number of data rows:",nrow(.D))
warning("Number of data rows with missing values (deleted):",nrow(.D)-sum(.na),"\n")
return(list(
object="sm3ic (3-state semi-markov model with interval-censored data)",
dist=dist,
cuts.12=cuts.12,
cuts.13=cuts.13,
cuts.23=cuts.23,
covariates=c( max(0, length(n.12)), max(0, length(n.13)), max(0, length(n.23)) ),
table=table.res,
cov.matrix=table.covariance,
LogLik=(-1*res$value),
AIC=2*length(res$par)-2*(-1*res$value)))
}
}
}
else
{
nh<-NULL
#sojourn time distributions
if(dist[1]=="WG" | dist[1]=="W" | (dist[1]=="E" & is.null(cuts.12)))
{
H12<-function(t,z,cuts) { exp(as.matrix(z) %*% coef12) * ((((1+(t/sigma12)^nu12))^(1/theta12))-1) }
h12<-function(t,z,cuts) { exp(as.matrix(z) %*% coef12)*(1/theta12*(1+(t/sigma12)^nu12)^(1/theta12-1))*(nu12*((1/sigma12)^nu12)*t^(nu12-1)) }
}
if(dist[1]=="PE" & !is.null(cuts.12))
{
cuts.12 <- sort(cuts.12)
if ((cuts.12[1] <= 0) || (cuts.12[length(cuts.12)] == Inf))
stop("'cuts.12' must be positive and finite.")
cuts.12 <- c(0, cuts.12, Inf)
H12<-function(t,z,cuts) {
H<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
H<-H+(1*(t>=cuts[i]))*exp(as.matrix(z) %*% coef12)*((pmin(cuts[i+1],t)-cuts[i])/sigma12[i])
}
return(H)
rm(H)
}
h12<-function(t,z,cuts) {
h<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
h<-h+(1*(t>=cuts[i])*(t<cuts[i+1]))* exp(as.matrix(z) %*% coef12)*(1/sigma12[i])
}
return(h)
rm(h)
}
}
if(dist[2]=="WG" | dist[2]=="W" | (dist[2]=="E" & is.null(cuts.13)))
{
H13<-function(t,z,cuts) { exp(as.matrix(z) %*% coef13) * ((((1+(t/sigma13)^nu13))^(1/theta13))-1) }
h13<-function(t,z,cuts) { exp(as.matrix(z) %*% coef13)*(1/theta13*(1+(t/sigma13)^nu13)^(1/theta13-1))*(nu13*((1/sigma13)^nu13)*t^(nu13-1)) }
}
if(dist[2]=="PE" & !is.null(cuts.13))
{
cuts.13 <- sort(cuts.13)
if ((cuts.13[1] <= 0) || (cuts.13[length(cuts.13)] == Inf))
stop("'cuts.13' must be positive and finite.")
cuts.13 <- c(0, cuts.13, Inf)
H13<-function(t,z,cuts) {
H<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
H<-H+(1*(t>=cuts[i]))*exp(as.matrix(z) %*% coef13)*((pmin(cuts[i+1],t)-cuts[i])/sigma13[i])
}
return(H)
rm(H)
}
h13<-function(t,z,cuts) {
h<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
h<-h+(1*(t>=cuts[i])*(t<cuts[i+1]))* exp(as.matrix(z) %*% coef13)*(1/sigma13[i])
}
return(h)
rm(h)
}
}
if(dist[3]=="WG" | dist[3]=="W" | (dist[3]=="E" & is.null(cuts.23)))
{
H23nh<-function(t,z,cuts,d12) { exp(as.matrix(z) %*% coef23) * exp(nh * log(d12)) * ((((1+(t/sigma23)^nu23))^(1/theta23))-1) }
h23nh<-function(t,z,cuts,d12) { exp(as.matrix(z) %*% coef23)* exp(nh * log(d12)) * (1/theta23*(1+(t/sigma23)^nu23)^(1/theta23-1))*(nu23*((1/sigma23)^nu23)*t^(nu23-1)) }
}
if(dist[3]=="PE" & !is.null(cuts.23))
{
cuts.23 <- sort(cuts.23)
if ((cuts.23[1] <= 0) || (cuts.23[length(cuts.23)] == Inf))
stop("'cuts.23' must be positive and finite.")
cuts.23 <- c(0, cuts.23, Inf)
H23nh<-function(t,z,cuts,d12) {
H<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
H<-H+(1*(t>=cuts[i]))*exp(as.matrix(z) %*% coef23) * exp(nh * log(d12)) * ((pmin(cuts[i+1],t)-cuts[i])/sigma23[i])
}
return(H)
rm(H)
}
h23nh<-function(t,z,cuts,d12) {
h<-rep(0,length(t))
for (i in (1:(length(cuts)-1)))
{
h<-h+(1*(t>=cuts[i])*(t<cuts[i+1])) * exp(nh * log(d12)) * exp(as.matrix(z) %*% coef23)*(1/sigma23[i])
}
return(h)
rm(h)
}
}
noeuds<-gauss.quad(legendre, kind="legendre", alpha=0, beta=0)$nodes
poids<-gauss.quad(legendre, kind="legendre", alpha=0, beta=0)$weights
#contributions to the log-likelihood
i13nh <- function(binf, bsup, y12, y13, y23, cutimes12, cutimes13, cutimes23)
{
m <- 0.5*(bsup - binf)
p <- 0.5*(bsup + binf)
.temp <- 0
for(i in 1:legendre)
{
.temp <- .temp + poids[i] * h12(m * noeuds[i] + p, y12, cutimes12) * exp(-H12(m * noeuds[i] + p, y12, cutimes12) - H13(m * noeuds[i] + p, y13, cutimes13)) * h23nh(bsup - m * noeuds[i] - p, y23, cutimes23, m * noeuds[i] + p) * exp(-H23nh(bsup - m * noeuds[i] - p, y23, cutimes23, m * noeuds[i] + p))
}
return(m*(.temp))
}
c13nh<-function(d0, d2, z12, z13, z23, cutimes12, cutimes13, cutimes23)
{return( h13(d2, z13, cutimes13) * exp(- H12(d2, z12, cutimes12) - H13(d2, z13, cutimes13)) + i13nh(d0, d2, z12, z13, z23, cutimes12, cutimes13, cutimes23) )}
c123nh <- function(binf, bsup, tot, y12, y13, y23, cutimes12, cutimes13, cutimes23)
{
m <- 0.5*(bsup - binf)
p <- 0.5*(bsup + binf)
.temp <- 0
for(i in 1:legendre)
{
.temp <- .temp + poids[i] * h12(m * noeuds[i] + p, y12, cutimes12) * exp(-H12(m * noeuds[i] + p, y12, cutimes12) - H13(m * noeuds[i] + p, y13, cutimes13)) * h23nh(tot - m * noeuds[i] - p, y23, cutimes23, m * noeuds[i] + p) * exp(-H23nh(tot - m * noeuds[i] - p, y23, cutimes23, m * noeuds[i] + p))
}
return(m*(.temp))
}
i1nh <- function(binf, bsup, y12, y13, y23, cutimes12, cutimes13, cutimes23)
{
m <- 0.5*(bsup - binf)
p <- 0.5*(bsup + binf)
.temp <- 0
for(i in 1:legendre)
{
.temp <- .temp + poids[i] * h12(m * noeuds[i] + p, y12, cutimes12) * exp(-H12(m * noeuds[i] + p, y12, cutimes12) -H13(m * noeuds[i] + p, y13, cutimes13) ) * exp(-H23nh(bsup - m * noeuds[i] - p, y23, cutimes23, m * noeuds[i] + p))
}
return(m*(.temp))
}
c1nh<-function(d0, d2, z12, z13, z23, cutimes12, cutimes13, cutimes23)
{return( exp(- H12(d2, z12, cutimes12) - H13(d2, z13, cutimes13)) + i1nh(d0, d2, z12, z13, z23, cutimes12, cutimes13, cutimes23) )}
c12nh <- function(binf, bsup, tot, y12, y13, y23, cutimes12, cutimes13, cutimes23)
{
m <- 0.5*(bsup - binf)
p <- 0.5*(bsup + binf)
.temp <- 0
for(i in 1:legendre)
{
.temp <- .temp + poids[i] * h12(m * noeuds[i] + p, y12, cutimes12) * exp(-H12(m * noeuds[i] + p, y12, cutimes12) - H13(m * noeuds[i] + p, y13, cutimes13)) * exp(-H23nh(tot - m * noeuds[i] - p, y23, cutimes23, m * noeuds[i] + p))
}
return(m*(.temp))
}
#missing data
.D <- cbind(times0, times2, cov.12, cov.13, cov.23, weights)
.na <- (!is.na(apply(.D, FUN="sum", MARGIN=1)))
#initialization of the parameters
if (is.null(cov.12)) {cov.12.mat <- cbind(rep(0, length(times1))); n.12 <- NULL} else { cov.12.mat <- cbind(cov.12); n.12 <- paste("covariate(s) on trans. 12: num", 1:ncol(data.frame(cov.12))); if(!is.null(names.12)) {n.12 <- names.12} }
if (is.null(cov.13)) {cov.13.mat <- cbind(rep(0, length(times1))); n.13 <- NULL} else { cov.13.mat <- cbind(cov.13); n.13 <- paste("covariate(s) on trans. 13: num", 1:ncol(data.frame(cov.13))); if(!is.null(names.13)) {n.13 <- names.13} }
if (is.null(cov.23)) {cov.23.mat <- cbind(rep(0, length(times1))); n.23 <- NULL} else { cov.23.mat <- cbind(cov.23); n.23 <- paste("covariate(s) on trans. 23: num", 1:ncol(data.frame(cov.23))); if(!is.null(names.23)) {n.23 <- names.23} }
if (is.null(ini.dist.12)) {i.12.dist<-rep(0, 1*(dist[1]=="E" & is.null(cuts.12)) + 2*(dist[1]=="W") + 3*(dist[1]=="WG") + 1*(dist[1]=="PE" & !is.null(cuts.12))*(length(cuts.12)-1))}
else {i.12.dist<-ini.dist.12}
if (is.null(ini.dist.13)) {i.13.dist<-rep(0, 1*(dist[2]=="E" & is.null(cuts.13)) + 2*(dist[2]=="W") + 3*(dist[2]=="WG") + 1*(dist[2]=="PE" & !is.null(cuts.13))*(length(cuts.13)-1))}
else {i.13.dist<-ini.dist.13}
if (is.null(ini.dist.23)) {i.23.dist<-rep(0, 1*(dist[3]=="E" & is.null(cuts.23)) + 2*(dist[3]=="W") + 3*(dist[3]=="WG") + 1*(dist[3]=="PE" & !is.null(cuts.23))*(length(cuts.23)-1))}
else {i.23.dist<-ini.dist.23}
if (!is.null(init.cov.12)) {i.12<-init.cov.12}
if (is.null(init.cov.12) & is.null(cov.12)) {i.12<-NULL}
if (is.null(init.cov.12) & !is.null(cov.12)) {i.12<-rep(0, ncol(data.frame(cov.12)))}
if (!is.null(init.cov.13)) {i.13<-init.cov.13}
if (is.null(init.cov.13) & is.null(cov.13)) {i.13<-NULL}
if (is.null(init.cov.13) & !is.null(cov.13)) {i.13<-rep(0, ncol(data.frame(cov.13)))}
if (!is.null(init.cov.23)) {i.23<-init.cov.23}
if (is.null(init.cov.23) & is.null(cov.23)) {i.23<-NULL}
if (is.null(init.cov.23) & !is.null(cov.23)) {i.23<-rep(0, ncol(data.frame(cov.23)))}
ini <- c(i.12.dist, i.13.dist, i.23.dist, 0, i.12, i.13, i.23)
if (is.null(weights)) {w <- rep(1, length(times1))} else {w <- weights }
#parameters for contributions associated to each transition
.w1 <- w[(sequences==1 & .na)]
.times1.0 <- times0[(sequences==1 & .na)]
.times1.2 <- times2[(sequences==1 & .na)]
.c1.12 <- cov.12.mat[(sequences==1 & .na),]
.c1.13 <- cov.13.mat[(sequences==1 & .na),]
.c1.23 <- cov.23.mat[(sequences==1 & .na),]
.w13 <- w[(sequences==13 & .na)]
.times13.0 <- times0[(sequences==13 & .na)]
.times13.2 <- times2[(sequences==13 & .na)]
.c13.12 <- cov.12.mat[(sequences==13 & .na),]
.c13.13 <- cov.13.mat[(sequences==13 & .na),]
.c13.23 <- cov.23.mat[(sequences==13 & .na),]
.w123 <- w[(sequences==123 & .na)]
.times123.0 <- times0[(sequences==123 & .na)]
.times123.1 <- times1[(sequences==123 & .na)]
.times123.2 <- times2[(sequences==123 & .na)]
.c123.12 <- cov.12.mat[(sequences==123 & .na),]
.c123.13 <- cov.13.mat[(sequences==123 & .na),]
.c123.23 <- cov.23.mat[(sequences==123 & .na),]
.w12 <- w[(sequences==12 & .na)]
.times12.0 <- times0[(sequences==12 & .na)]
.times12.1 <- times1[(sequences==12 & .na)]
.times12.2 <- times2[(sequences==12 & .na)]
.c12.12 <- cov.12.mat[(sequences==12 & .na),]
.c12.13 <- cov.13.mat[(sequences==12 & .na),]
.c12.23 <- cov.23.mat[(sequences==12 & .na),]
#log-likelihood
logV<-function(x)
{
if (dist[1]=="E" & is.null(cuts.12)) {assign("sigma12", exp(x[1]), inherits = TRUE); assign("nu12", 1, inherits = TRUE); assign("theta12", 1, inherits = TRUE); i<-1}
if (dist[1]=="W") {assign("sigma12", exp(x[1]), inherits = TRUE); assign("nu12", exp(x[2]), inherits = TRUE); assign("theta12", 1, inherits = TRUE); i<-2}
if (dist[1]=="WG") {assign("sigma12", exp(x[1]), inherits = TRUE); assign("nu12", exp(x[2]), inherits = TRUE); assign("theta12", exp(x[3]), inherits = TRUE); i<-3}
if (dist[1]=="PE" & !is.null(cuts.12)) {assign("sigma12", exp(x[1:(length(cuts.12)-1)]), inherits = TRUE); i<-(length(cuts.12)-1)}
if (dist[2]=="E" & is.null(cuts.13)) {assign("sigma13", exp(x[i+1]), inherits = TRUE); assign("nu13", 1, inherits = TRUE); assign("theta13", 1, inherits = TRUE); i<-i+1}
if (dist[2]=="W") {assign("sigma13", exp(x[i+1]), inherits = TRUE); assign("nu13", exp(x[i+2]), inherits = TRUE); assign("theta13", 1, inherits = TRUE); i<-i+2}
if (dist[2]=="WG") {assign("sigma13", exp(x[i+1]), inherits = TRUE); assign("nu13", exp(x[i+2]), inherits = TRUE); assign("theta13", exp(x[i+3]), inherits = TRUE); i<-i+3}
if (dist[2]=="PE" & !is.null(cuts.13)) {assign("sigma13", exp(x[(i+1):(i+length(cuts.13)-1)]), inherits = TRUE); i<-(i+length(cuts.13)-1)}
if (dist[3]=="E" & is.null(cuts.23)) {assign("sigma23", exp(x[i+1]), inherits = TRUE); assign("nu23", 1, inherits = TRUE); assign("theta23", 1, inherits = TRUE); i<-i+1}
if (dist[3]=="W") {assign("sigma23", exp(x[i+1]), inherits = TRUE); assign("nu23", exp(x[i+2]), inherits = TRUE); assign("theta23", 1, inherits = TRUE); i<-i+2}
if (dist[3]=="WG") {assign("sigma23", exp(x[i+1]), inherits = TRUE); assign("nu23", exp(x[i+2]), inherits = TRUE); assign("theta23", exp(x[i+3]), inherits = TRUE); i<-i+3}
if (dist[3]=="PE" & !is.null(cuts.23)) {assign("sigma23", exp(x[(i+1):(i+length(cuts.23)-1)]), inherits = TRUE); i<-(i+length(cuts.23)-1)}
assign("nh", x[i+1], inherits = TRUE); i<-i+1
if (is.null(cov.12)) {assign("coef12", 0, inherits = TRUE)}
else {assign("coef12", x[(i+1):(i+ncol(data.frame(cov.12)))], inherits = TRUE); i <-i+ncol(data.frame(cov.12))}
if (is.null(cov.13)) {assign("coef13", 0, inherits = TRUE)}
else {assign("coef13", x[(i+1):(i+ncol(data.frame(cov.13)))], inherits = TRUE); i <-i+ncol(data.frame(cov.13))}
if (is.null(cov.23)) {assign("coef23", 0, inherits = TRUE)}
else {assign("coef23", x[(i+1):(i+ncol(data.frame(cov.23)))], inherits = TRUE); i <-i+ncol(data.frame(cov.23))}
return( -1*(
sum( .w1 * log(c1nh(.times1.0, .times1.2, .c1.12, .c1.13, .c1.23, cuts.12, cuts.13, cuts.23)) ) +
sum( .w13 * log(c13nh(.times13.0, .times13.2, .c13.12, .c13.13, .c13.23, cuts.12, cuts.13, cuts.23)) ) +
sum( .w123 * log(c123nh(.times123.0, .times123.1, .times123.2, .c123.12, .c123.13, .c123.23, cuts.12, cuts.13, cuts.23)) ) +
sum( .w12 * log(c12nh(.times12.0, .times12.1, .times12.2, .c12.12, .c12.13, .c12.23, cuts.12, cuts.13, cuts.23)) ) ) )
}
#first maximum likelihood optimization
n<-1
res<-tryCatch(optim(ini, logV, hessian=conf.int, control=list(maxit=100000)))
if(inherits(res, "error")) {
warning("Maximum likelihood optimization fails to converge", "\n")
} else {
if(silent==FALSE) {warning(-1*res$value, "\n")}
#further maximum likelihood optimizations
if(is.null(precision)) {delta <- 10^(-6)} else {delta <-precision}
while (n<=2 & !(inherits(res, "error"))) {
temp.value<-res$value
res<-tryCatch(optim(res$par, logV, hessian=conf.int, control=list(maxit=100000)))
if (!(inherits(res, "error"))) {
n<-1*((temp.value-res$value)>delta) + (n+1)*((temp.value-res$value)<=delta)
if(silent==FALSE) {warning(-1*res$value,"\n")} }
}
if(inherits(res, "error")) {
warning("Maximum likelihood optimization fails to converge", "\n")
} else {
#output
if (conf.int==TRUE) {
if (max(!is.na(tryCatch(solve(res$hessian), error=function(e) NA)),na.rm=F)==1){
table.res <- data.frame(Estimate = round(res$par, 4),
SE = round(sqrt(diag(solve(res$hessian))), 4),
Wald = round(res$par/sqrt(diag(solve(res$hessian))), 4),
Pvalue = round(2*(1-pnorm(abs(res$par/sqrt(diag(solve(res$hessian)))), 0, 1)) , 4) )
names(table.res)<-c("Estimate","Std.Error","t.value","Pr(>|t|)")
table.covariance<-solve(res$hessian)
}
else {
table.res <- data.frame(Estimate = round(res$par, 4) )
table.covariance<-NULL
warning("\n Hessian matrix not defined", "\n")
} #end else for hessian matrix condition
}
if (conf.int==FALSE) {
table.res <- data.frame(Coef = round(res$par, 4) )
table.covariance<-NULL
}
if (dist[1]=="E" & is.null(cuts.12)) { lab12<-c("log(sigma) on trans. 12")}
if (dist[1]=="W" & is.null(cuts.12)) { lab12<-c("log(sigma) on trans. 12", "log(nu) on trans. 12")}
if (dist[1]=="WG" & is.null(cuts.12)) { lab12<-c("log(sigma) on trans. 12", "log(nu) on trans. 12", "log(theta) on trans. 12")}
if (dist[1]=="PE" & !is.null(cuts.12)) {
lab12<-rep("",length(cuts.12)-1)
for (i in (1:(length(cuts.12)-1)))
{
lab12[i]<-paste("log(sigma) on trans. 12, interval [",cuts.12[i],";",cuts.12[i+1],"[",sep="")
}
}
if (dist[2]=="E" & is.null(cuts.13)) { lab13<-c("log(sigma) on trans. 13")}
if (dist[2]=="W" & is.null(cuts.13)) { lab13<-c("log(sigma) on trans. 13", "log(nu) on trans. 13")}
if (dist[2]=="WG" & is.null(cuts.13)) { lab13<-c("log(sigma) on trans. 13", "log(nu) on trans. 13", "log(theta) on trans. 13")}
if (dist[2]=="PE" & !is.null(cuts.13)) {
lab13<-rep("",length(cuts.13)-1)
for (i in (1:(length(cuts.13)-1)))
{
lab13[i]<-paste("log(sigma) on trans. 13, interval [",cuts.13[i],";",cuts.13[i+1],"[",sep="")
}
}
if (dist[3]=="E" & is.null(cuts.23)) { lab23<-c("log(sigma) on trans. 23")}
if (dist[3]=="W" & is.null(cuts.23)) { lab23<-c("log(sigma) on trans. 23", "log(nu) on trans. 23")}
if (dist[3]=="WG" & is.null(cuts.23)) { lab23<-c("log(sigma) on trans. 23", "log(nu) on trans. 23", "log(theta) on trans. 23")}
if (dist[3]=="PE" & !is.null(cuts.23)) {
lab23<-rep("",length(cuts.23)-1)
for (i in (1:(length(cuts.23)-1)))
{
lab23[i]<-paste("log(sigma) on trans. 23, interval [",cuts.23[i],";",cuts.23[i+1],"[",sep="")
}
}
lab<-c(lab12, lab13, lab23, "coef. of non-homogeneity", n.12, n.13, n.23)
rownames(table.res) <- paste(1:length(lab), lab)
warning("\n Number of data rows:",nrow(.D))
warning("Number of data rows with missing values (deleted):",nrow(.D)-sum(.na),"\n")
return(list(
object="sm3ic (3-state semi-markov model with interval-censored data)",
dist=dist,
cuts.12=cuts.12,
cuts.13=cuts.13,
cuts.23=cuts.23,
covariates=c( max(0, length(n.12)), max(0, length(n.13)), max(0, length(n.23)) ),
table=table.res,
cov.matrix=table.covariance,
LogLik=(-1*res$value),
AIC=2*length(res$par)-2*(-1*res$value)))
} #end else for maximum likelihood optimization
} #end else for first maximum likelihood optimization
}
}
|
06c24742bc5c86c33764899ede16c6d80566a73f
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.application.integration/man/mwaa_create_web_login_token.Rd
|
13493bf26c8f83dd284af5ac467eb5891532b61f
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 712
|
rd
|
mwaa_create_web_login_token.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mwaa_operations.R
\name{mwaa_create_web_login_token}
\alias{mwaa_create_web_login_token}
\title{Creates a web login token for the Airflow Web UI}
\usage{
mwaa_create_web_login_token(Name)
}
\arguments{
\item{Name}{[required] The name of the Amazon MWAA environment. For example,
\code{MyMWAAEnvironment}.}
}
\description{
Creates a web login token for the Airflow Web UI. To learn more, see \href{https://docs.aws.amazon.com/mwaa/latest/userguide/call-mwaa-apis-web.html}{Creating an Apache Airflow web login token}.
See \url{https://www.paws-r-sdk.com/docs/mwaa_create_web_login_token/} for full documentation.
}
\keyword{internal}
|
e75066bf33179a709e10318f85ccc1395cf65810
|
a442e52a01ba59da34bdca21ba4e2f3dc4f7e599
|
/man/tfpwmk.Rd
|
b537820a4ef5d23aadd6875bf507bd82b54fcd34
|
[] |
no_license
|
cran/modifiedmk
|
93f91b680e1f6a0e935eb193afb7f6d015e19507
|
87569a63622e996fd423fb872bba0ffb01ed7c2e
|
refs/heads/master
| 2023-05-25T18:17:54.465263
| 2021-06-10T06:20:02
| 2021-06-10T06:20:02
| 107,578,860
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,210
|
rd
|
tfpwmk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tfpwmk.R
\name{tfpwmk}
\alias{tfpwmk}
\title{Mann-Kendall Trend Test Applied to Trend-Free Prewhitened Time Series Data in Presence of Serial Correlation Using Yue et al. (2002) Approach}
\usage{
tfpwmk(x)
}
\arguments{
\item{x}{- Time series data vector}
}
\value{
Z-Value - Z statistic after trend-free prewhitening (TFPW)
Sen's Slope - Sen's slope for TFPW series
Old Sen's Slope - Sen's slope for original data series (x)
P-value - P-value after trend-free prewhitening
S - Mann-Kendall S statistic
Var(s) - Variance of S
Tau - Mann-Kendall's Tau
}
\description{
When the time series data are not random and influenced by autocorrelation, the trend component is removed from the data and is prewhitened prior to the application of the trend test.
}
\details{
The linear trend component is removed from the original data and then prewhitened using the lag-1 serial correlation coefficient. The prewhitening data are then tested with Mann-Kendall trend test.
}
\examples{
x<-c(Nile)
tfpwmk(x)
}
\references{
Kendall, M. (1975). Rank Correlation Methods. Griffin, London, 202 pp.
Kulkarni, A. and H. von Storch. 1995. Monte carlo experiments on the effects of serial correlation on the MannKendall test of trends. Meteorologische Zeitschrift N.F, 4(2): 82-85.
Mann, H. B. (1945). Nonparametric Tests Against Trend. Econometrica, 13(3): 245-259.
Salas, J.D. (1980). Applied modeling of hydrologic times series. Water Resources Publication, 484 pp.
Sen, P. K. (1968). Estimates of the Regression Coefficient Based on Kendall’s Tau. Journal of the American Statistical Association, 63(324): 1379. <doi:10.2307/2285891>
von Storch, V. H. (1995). Misuses of statistical analysis in climate research, In: Analysis of Climate Variability: Applications of Statistical Techniques, ed. von H. V. Storch and A. Navarra A. Springer-Verlag, Berlin: 11-26.
Yue, S., Pilon, P., Phinney, B., and Cavadias, G. (2002). The influence of autocorrelation on the ability to detect trend in hydrological series. Hydrological Processes, 16(9): 1807–1829. <doi:10.1002/hyp.1095>
}
|
ba95d8eaea65bae0b2d1b62478b8a4e7fbf7b4e0
|
065d5b3617507f983379e45d2c94c85a29a65667
|
/.Rprofile
|
32a1313144c07703a2ed55756df06815b8960c99
|
[] |
no_license
|
aloosefish/aloosefish_website
|
22b117275d0ee9700d2cfbb6ec88c4a61dc15673
|
74c7ead9230933bf8eb3635143f232baa2349fc2
|
refs/heads/master
| 2021-06-23T01:30:05.616889
| 2019-06-11T17:49:36
| 2019-06-11T17:49:36
| 138,337,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
rprofile
|
.Rprofile
|
options(blogdown.author = "Ian Davis")
options(blogdown.ext = ".Rmd")
options(blogdown.subdir = "post")
options(blogdown.yaml.empty = TRUE)
|
d86964f9a7235848605ee75d9840ec9d660337e9
|
2363bb535bd8d73fbeaed0195dfe9abf3c0e7b77
|
/setting.R
|
ca8fe8c158fcab0d2246165329fdd9275dd52525
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/ETL---Korean-NEDIS
|
7bd5713f7ee35a445d813c3ef0ffa6511fee4efd
|
b3bbfb04e17ce9e3eed8f3dc1b11869944362336
|
refs/heads/master
| 2020-03-20T10:19:50.090905
| 2019-06-10T08:12:29
| 2019-06-10T08:12:29
| 137,366,496
| 1
| 0
|
Apache-2.0
| 2019-02-16T04:36:25
| 2018-06-14T14:07:20
| null |
UTF-8
|
R
| false
| false
| 1,940
|
r
|
setting.R
|
# paackage loading
library(dplyr)
library(lubridate)
library(SqlRender)
library(DatabaseConnector)
dataFolder<- "" ##"<set your data folder where your NEDIS file exists >"
scriptFolder <- "" ##"<set your script folder for ETL where your script file exists>"
EMIHPTMI_file <- "" ## "insert your csv file name for EMIPHTMI"
EMIHDGOT_file <- "" ## "insert your csv file name for EMIHDGOT"
condition_mapping_file <- "" ## "insert your file name for mapping_condition"
# data load
EMIHPTMI <- read.csv(file.path(dataFolder, EMIHPTMI_file),
stringsAsFactors=FALSE,
quote = "\"'",
colClasses = c("ptmiidno" = "character",
"ptmiintm" = "character",
"ptmiaktm" = "character",
"ptmiottm" = "character",
"ptmihstm" = "character",
"ptmidctm" = "character"))
# column name change to lower case
EMIHPTMI <- setNames(EMIHPTMI, tolower(names(EMIHPTMI)))
EMIHPTMI_seq <- seq(nrow(EMIHPTMI))
EMIHPTMI <- data.frame(EMIHPTMI_seq, EMIHPTMI)
EMIHDGOT <- read.csv(file.path(dataFolder,EMIHDGOT_file),
stringsAsFactors=FALSE,
quote = "\"'",
colClasses = c("dgotidno" = "character",
"dgotintm" = "character")
)
condition_mapping_table <-read.csv(file.path(dataFolder,condition_mapping_file),
stringsAsFactors=FALSE,
quote = "\"'"
)
# connection
dbms = "" ##"<set your dbms ex) "sql server" >
server = "" ##"<set your server IP" >
schema = "" ##"<set your schema" >
user= "" ##"<set your username" >
password="" ##"<set your password" >
|
71e5b052f32e55f34c131c39450db656176d65b6
|
5aad40ba5a1eb29799c1ad1c21495faa07e99b36
|
/extern/adobe_photoshop_cs5_sdk_win/pluginsdk/samplecode/format/simpleformat/mac/SimpleFormatUI.r
|
fe0bb0a86130d4eec3930cec3bea61c900112347
|
[] |
no_license
|
q4a/navidia_proj
|
3525bcfdb12dc20dcecf08f2603324feceef7394
|
32c839744ad849f8d4d0273c635c4d601cc1980f
|
refs/heads/master
| 2023-04-16T10:44:39.466782
| 2019-11-30T10:26:47
| 2019-11-30T10:26:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,654
|
r
|
SimpleFormatUI.r
|
// ADOBE SYSTEMS INCORPORATED
// Copyright 1993 - 2002 Adobe Systems Incorporated
// All Rights Reserved
//
// NOTICE: Adobe permits you to use, modify, and distribute this
// file in accordance with the terms of the Adobe license agreement
// accompanying it. If you have received this file from a source
// other than Adobe, then your use, modification, or distribution
// of it requires the prior written permission of Adobe.
//-------------------------------------------------------------------
//-------------------------------------------------------------------------------
//
// File:
// SimpleFormatUI.r
//
// Description:
// Dialog for the SimpleFormat Mac project.
//
//-------------------------------------------------------------------------------
#include "Types.r"
/* the list box control that looks a lot like a combo box :-) */
resource 'MENU' (16050) {
16050,
textMenuProc,
allEnabled,
enabled,
"Resources",
{ /* array: 4 elements */
/* [1] */
"temp", noIcon, noKey, noMark, plain,
}
};
resource 'CNTL' (16050, purgeable) {
{30, 10, 190, 160},
0,
visible,
0,
16050,
1008,
0,
""
};
resource 'DITL' (16050, "SimpleFormat UI", purgeable) {
{ /* array DITLarray: 11 elements */
/* [1] */
{8, 270, 28, 338},
Button {
enabled,
"Remove"
},
/* [2] */
{40, 270, 60, 338},
Button {
enabled,
"Skip"
},
/* [3] */
{8, 10, 24, 190},
StaticText {
disabled,
"Resource Identifier"
},
/* [4] */
{30, 10, 190, 160},
Control {
enabled,
16050
},
/* [5] */
{90, 170, 106, 260},
StaticText {
disabled,
"Type:"
},
/* [6] */
{90, 270, 106, 338},
StaticText {
disabled,
"run time text"
},
/* [7] */
{110, 170, 126, 260},
StaticText {
disabled,
"ID:"
},
/* [8] */
{110, 270, 126, 338},
StaticText {
disabled,
"run time id"
},
/* [9] */
{130, 170, 146, 260},
StaticText {
disabled,
"Name:"
},
/* [10] */
{130, 270, 146, 338},
StaticText {
disabled,
"run time name"
},
/* [11]170*/
{150, 170, 166, 260},
StaticText {
disabled,
"Size:"
},
/* [12] */
{150, 270, 166, 338},
StaticText {
disabled,
"run time size"
},
/* [13] */
{170, 170, 186, 338},
CheckBox {
enabled,
"Keep Resource"
}
}
};
resource 'dlgx' (16050) {
versionZero {
kDialogFlagsHandleMovableModal + kDialogFlagsUseThemeControls + kDialogFlagsUseThemeBackground
}
};
resource 'DLOG' (16050, "SimpleFormat", purgeable) {
{100, 150, 300, 550},
movableDBoxProc,
visible,
noGoAway,
0x0,
16050,
"Simple Format Resource Remover",
centerParentWindowScreen
};
// end SimpleFormatUI.r
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.