blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6d443e0cdd8d3bd947a52900a1118506ab827f5f
|
6c55d0793217f79c36fe4ef59ae2a9e6907af3db
|
/Day05/day05.R
|
a4454c404ca36ba85527afd0eacce52612af9202
|
[] |
no_license
|
Darius-Jaraminas/advent_of_code_2020
|
decb06a1b9262fbf06de6ef1f92efd14d3d4f0dc
|
88242d6c185a528d2ac251094ad8908c8bc54cf6
|
refs/heads/master
| 2023-02-07T02:14:38.090605
| 2020-12-25T09:22:21
| 2020-12-25T09:22:21
| 317,453,101
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,162
|
r
|
day05.R
|
library(dplyr)
source("fun.R")
# part 1
# test
t1 <- read.csv("test1.txt", header = FALSE, stringsAsFactors = FALSE)
row <- numeric(nrow(t1))
seat <- numeric(nrow(t1))
seat_id <- numeric(nrow(t1))
for (i in 1:nrow(t1)){
row_coord <- clean_coord(board = t1[i, 1], subset = c(1, 7))
row[i] <- find_binary(set = 0:127, coord = row_coord)
seat_coord <- clean_coord(board = t1[i, 1], subset = c(8, 10))
seat[i] <- find_binary(set = 0:7, coord = seat_coord)
seat_id[i] <- row[i] * 8 + seat[i]
}
all(row == c(44, 70, 14, 102))
all(seat == c(5, 7, 7, 4))
all(seat_id == c(357, 567, 119, 820))
# solve
inp <- read.csv("input.txt", header = FALSE, stringsAsFactors = FALSE)
row <- numeric(nrow(inp))
seat <- numeric(nrow(inp))
seat_id <- numeric(nrow(inp))
for (i in 1:nrow(inp)){
row_coord <- clean_coord(board = inp[i, 1], subset = c(1, 7))
row[i] <- find_binary(set = 0:127, coord = row_coord)
seat_coord <- clean_coord(board = inp[i, 1], subset = c(8, 10))
seat[i] <- find_binary(set = 0:7, coord = seat_coord)
seat_id[i] <- row[i] * 8 + seat[i]
}
max(seat_id)
# part 2
s_id <- sort(seat_id)
di <- diff(s_id)
my_seat <- s_id[which(di == 2)] + 1
|
d9841ad32028b0288c14d64a39b0a5c716d709f0
|
78aa2a91d46ef0f030f30bc5adf3bddf100416cd
|
/man/getXrange.Rd
|
87677595eefb352c64916bdae87dd51de0d24f7e
|
[] |
no_license
|
jrthompson54/DGE.Tools2
|
c7239ee27c1d6c328d2c860618fbd09147ae5fc5
|
f7f9badef7d94a7e637ca3716a073a1ddbf4f5d2
|
refs/heads/master
| 2021-08-06T04:30:18.547140
| 2021-05-12T13:15:13
| 2021-05-12T13:15:13
| 250,071,780
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 532
|
rd
|
getXrange.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ThemePack.R
\name{getXrange}
\alias{getXrange}
\title{Function getXrange}
\usage{
getXrange(p)
}
\arguments{
\item{p}{A ggplot object}
}
\value{
a vector of length 2 with xmin, xmax
}
\description{
Deprecated. Use xrange instead.
A simple function to spare me from looking up the syntax everytime
I want to get the x range of a plot
}
\examples{
xrange<- getXrange(Myggplot)
}
\author{
John Thompson, \email{john.thompson@bms.com}
}
\keyword{ggplot2}
|
4123863d6cc82463029e94e4c33c61a4b43907bc
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962534-test.R
|
5686f0abd114eba6218ca7645844b0fbfe16c9c5
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 574
|
r
|
1609962534-test.R
|
testlist <- list(x = 1465341783L, y = c(-1499027802L, -1499037696L, 0L, 0L, 32934L, -1499027802L, -1499027802L, -1499027802L, -1499027802L, -1499027802L, -1499027802L, -1499027802L, -1499027802L, -1499027802L, -1499027802L, 367L, 1853030400L, -539616721L, -11788545L, -65281L, -1L, -2686977L, -134225921L, -1L, -256L, 0L, 16777215L, -1537L, -687865857L, -687865857L, -2686977L, -134225921L, -2049L, -539616721L, -11788545L, -42L, 439353343L, -42L, -2049L, -1610612737L, -1L, -1L, -687865865L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
b28efcee427826cc438e01fc74ed3c23c08be675
|
db9babe7093ccc38fe3ed282974922960a3d97fa
|
/Scripts/Broken_axis.R
|
a2222fe8665f040da39583eb9bfba5e102a9a1a0
|
[
"MIT"
] |
permissive
|
cxli233/FriendsDontLetFriends
|
f6826aa7e486b55bc82cdd0edabc67db5deade6c
|
c58f9eee68088183b5dc515384e0fd7872653088
|
refs/heads/main
| 2023-03-17T06:27:18.882960
| 2023-01-16T23:28:27
| 2023-01-16T23:28:27
| 495,942,174
| 794
| 30
|
MIT
| 2022-12-31T16:52:37
| 2022-05-24T18:26:12
|
R
|
UTF-8
|
R
| false
| false
| 3,037
|
r
|
Broken_axis.R
|
library(tidyverse)
library(patchwork)
library(RColorBrewer)
a <- data.frame(
x = c("a", "b", "c", "d", "e"),
y = c(10, 20, 30, 450, 500)
)
lower <- a %>%
ggplot(aes(x = x, y = y)) +
geom_bar(stat = "identity", aes(fill = x),
width = 0.7, color = "black") +
geom_text(aes(label = y), vjust = -0.2) +
scale_fill_manual(values = brewer.pal(8, "Set2")) +
theme_classic() +
theme(legend.position = "none",
axis.title.y = element_text(hjust =1 )) +
coord_cartesian(ylim = c(0, 50))
upper1 <- a %>%
ggplot(aes(x = x, y = y)) +
geom_bar(stat = "identity", aes(fill = x),
width = 0.7, color = "black") +
geom_text(aes(label = y), vjust = -0.2) +
scale_fill_manual(values = brewer.pal(8, "Set2")) +
labs(x = NULL,
y = NULL) +
theme_classic() +
theme(legend.position = "none",
axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank()) +
coord_cartesian(ylim = c(400, 550))
upper2 <- a %>%
ggplot(aes(x = x, y = y)) +
geom_bar(stat = "identity", aes(fill = x),
width = 0.7, color = "black") +
geom_text(aes(label = y), vjust = -0.2) +
scale_fill_manual(values = brewer.pal(8, "Set2")) +
labs(x = NULL,
y = NULL) +
theme_classic() +
theme(legend.position = "none",
axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank()) +
coord_cartesian(ylim = c(450, 520))
broken1 <- wrap_plots(upper1, lower, nrow = 2) &
theme(axis.text = element_text(color = "black"),
text = element_text(size = 14))
broken2 <- wrap_plots(upper2, lower, nrow = 2) &
theme(axis.text = element_text(color = "black"),
text = element_text(size = 14))
wrap_plots(broken1, broken2, nrow = 1)
ggsave("../Results/Broken_axis.svg", height = 4, width = 6, bg = "white")
ggsave("../Results/Broken_axis.png", height = 4, width = 6, bg = "white")
left <- a %>%
ggplot(aes(x = y, y = x)) +
geom_bar(stat = "identity", aes(fill = x),
width = 0.7, color = "black") +
geom_text(aes(label = y), hjust = -0.2) +
scale_fill_manual(values = brewer.pal(8, "Set2")) +
theme_classic() +
theme(legend.position = "none",
axis.title.x = element_text(hjust =1 )) +
coord_cartesian(xlim = c(0, 50))
right <- a %>%
ggplot(aes(x = y, y = x)) +
geom_bar(stat = "identity", aes(fill = x),
width = 0.7, color = "black") +
geom_text(aes(label = y),hjust = -0.2) +
scale_fill_manual(values = brewer.pal(8, "Set2")) +
labs(x = NULL,
y = NULL) +
theme_classic() +
theme(legend.position = "none",
axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank()) +
coord_cartesian(xlim = c(400, 600))
wrap_plots(left, right, nrow = 1) &
theme(axis.text = element_text(color = "black"),
text = element_text(size = 14))
|
d45f48cc9613aa862d9bb1b242bb434ce4def546
|
1250dace0174bc93655c08cca190bed948717f40
|
/R/cartotools-package.R
|
af71fc7e9a123e724ccf96766ea1cf16cd810d15
|
[] |
no_license
|
sumtxt/cartotools
|
80ced0ba486603b1b5848e2bb2b93c748195f5ff
|
8dd3237c80cc74344700da5d9f89d285912384c4
|
refs/heads/master
| 2021-04-27T00:16:05.426269
| 2018-03-04T11:25:00
| 2018-03-04T11:25:00
| 123,781,848
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 548
|
r
|
cartotools-package.R
|
#'
#'
#' Feedback is very welcome!
#'
#'
#' @details
#' \tabular{ll}{
#' Package: \tab cartotools\cr
#' Type: \tab Package\cr
#' Version: \tab 0.0.0.9000\cr
#' Date: \tab 2018-02-11\cr
#' License: \tab GPL-3\cr
#' }
#'
#' @name cartotools-package
#'
#' @docType package
#' @aliases cartotools
#' @title Simple tools I tend to use a lot when working with GIS data in R
#' @author Moritz Marbach moritz.marbach@gess.ethz.ch
#'
#'
#' @importFrom raster raster
#' @importFrom gdalUtils gdal_translate
#' @importFrom geometry convhulln
NULL
|
89de35d51c36f9f1c25afc887692e299980bf1fe
|
441801672f62ec4513c600e09f63305572b1b11d
|
/man/metadata.Rd
|
f5cea8666afcc22cce5710b4db850490dfafc1e8
|
[] |
no_license
|
sneumann/mzR-playground
|
d2379e6790cebf32ba3887bbb9ffad35af07d6ad
|
134104c7af6d979e41c62ea75f84cb45aa84b05d
|
refs/heads/master
| 2021-01-19T13:46:21.843142
| 2014-04-03T08:49:07
| 2014-04-03T08:49:07
| 1,159,529
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,368
|
rd
|
metadata.Rd
|
\name{metadata}
\alias{analyzer}
\alias{detector}
\alias{instrumentInfo}
\alias{ionisation}
\alias{manufacturer}
\alias{model}
\alias{runInfo}
\title{
Access the metadata from an \code{mzR} object.
}
\usage{
runInfo(object)
analyzer(object)
detector(object)
instrumentInfo(object)
ionisation(object)
manufacturer(object)
model(object)
}
\arguments{
\item{object}{An instantiated \code{mzR} object.}
}
\description{
Accessors to the analytical setup metadata of a run.
\code{runInfo} will show a summary of the experiment as a named list,
including \code{scanCount}, \code{lowMZ}, \code{highMZ}, \code{startMZ},
\code{endMZ}, \code{dStartTime} and \code{dEndTime}.
The \code{instrumentInfo} method returns a named \code{list} including
instrument manufacturer, model, ionisation technique, analyzer and
detector. These individual pieces of information can also be directly
accessed by the specific methods.
}
\seealso{
See for example \code{\link{peaks}} to access the data for the spectra
in a \code{"\linkS4class{mzR}"} class.
}
\author{
Steffen Neumann and Laurent Gatto
}
\examples{
library(msdata)
filepath <- system.file("microtofq", package = "msdata")
file <- list.files(filepath, pattern="MM14.mzML",
full.names=TRUE, recursive = TRUE)
mz <- openMSfile(file)
fileName(mz)
runInfo(mz)
close(mz)
}
|
df916869223959e9af4b25e1cc59acfc5e3275e5
|
f52a49233f0df01966a2cbf787823a19aa63d20d
|
/man/bind_rows2.Rd
|
f918c8b5b4293c2a9f4f9273d2ff2cf8a0a2eb1d
|
[] |
no_license
|
markwh/markstats
|
e09b5e3598b3ba5e26fd961e02026932f452c49f
|
b8da28f7b1975fc9bc94818a92dd644d38155a4e
|
refs/heads/master
| 2021-01-17T09:20:20.239503
| 2019-05-21T02:05:18
| 2019-05-21T02:05:18
| 40,378,676
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 363
|
rd
|
bind_rows2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{bind_rows2}
\alias{bind_rows2}
\title{A better (but slower) version of dplyr::bind_rows}
\usage{
bind_rows2(dfList, addMissing = TRUE, verbose = FALSE)
}
\arguments{
\item{dflist}{a list of data.frames}
}
\description{
A better (but slower) version of dplyr::bind_rows
}
|
0b29c94dd3520e0c0f313ae79effbc9672d3491e
|
623931d5753b1d5691e5dcb9a133c1c786b56bc5
|
/SpeciesListBreeding.R
|
a4f180bdb872a10c1829bea983cd75a348183e59
|
[] |
no_license
|
daisyduursma/CleanBreedingBirdObservations
|
e971ab54221284e84dea8bace22e717125afa9c9
|
76838c0d531d3766b8158925d024af0128647474
|
refs/heads/master
| 2021-08-26T07:08:56.928559
| 2017-11-21T20:42:18
| 2017-11-21T20:42:18
| 111,591,719
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,143
|
r
|
SpeciesListBreeding.R
|
rm(list = ls())
#prep species list
sp<-read.csv('/Users/daisy/Google Drive/PhD/BreedingTiming/tables/speciesOfInterest_2015-07-06.csv')
colnames(sp)[1]<-"ScientificName"
colnames(sp)[4]<-"CommonName"
sp$genus<-t(as.data.frame(strsplit(as.character(sp$ScientificName),
" ")))[,1]
sp<-sp[,c("ScientificName","CommonName","genus","Family.scientific.name","Order","Freq","SpNo")]
colnames(sp)<-c("ScientificName","CommonName","genus","family","order","Freq","SpNo")
#page numbers
pg<-read.csv('/Users/daisy/Google Drive/PhD/birdTraits/IncubationFledging/speciesOfInterest.csv')[,c('Species','book','pgNu')]
sp<-merge(sp,pg,by.x="ScientificName",by.y="Species",all.x=TRUE)
#Garnett traits -
gnt<-read.csv("/Users/daisy/Google Drive/PhD/birdTraits/Garnett paper/NSD-Data Descriptor/Australian_Bird_Data_Version_1_0.csv")
gnt$Taxon.scientific.name<- str_trim(gnt$Taxon.scientific.name)
gnt$Taxon.scientific.name<-capitalize(tolower(gnt$Taxon.scientific.name))
gnt<-subset(gnt,Species ==1)
gnt<- subset(gnt,Population.description == "Endemic (breeding only)"
| Population.description == "Australian"
| Population.description == "Endemic (entirely Australian)"
| Population.description =="Introduced" )
gnt<-gnt[,c("Taxon.scientific.name","Population.description","EPBC.Status.Nov..2014" ,
"Australian.IUCN.Red.List.status.2014","Australian.IUCN.Red.List.criteria.2014",
"Global.IUCN.status.2014","Global.IUCN.criteria.2014","Breeding.habitat..Arid.shrubland",
"Breeding.habitat..Chenopod.shrubland","Breeding.habitat..Heath","Breeding.habitat..Triodia.hummock.grassland",
"Breeding.habitat..Other.grassland","Breeding.habitat..Mallee","Breeding.habitat..Tropical.savanna.woodland",
"Breeding.habitat..Temperate.dry.scleorphyll.forest.and.woodland","Breeding.habitat..Temperate.wet.scleorphyll.forest.and.woodland",
"Breeding.habitat..Rainforest","Breeding.habitat..Mangrove","Breeding.habitat..inland.wetland",
"Breeding.habitat..Beaches.and.sand.cays","Breeding.habitat..Rocky.coasts.and.islets",
"Breeding.habitat..Other.non.Australian.habitats","Patch.clade", "Hackett.fine.clades","Hackett.coarse.clades")]
sp<-merge(sp,gnt,by.x="ScientificName",by.y="Taxon.scientific.name",all.x=TRUE)
#get traits of interest
traits<-read.csv("/Users/daisy/Google Drive/PhD/BreedingTiming/tables/speciesOfInterest_2015-05-05.csv")[c("Species",
"ClutchSizeMean","clutchDerived","RateOfLay","RateOfLayDerived","IncubationMean","IncubationDerivedHBW",
"IncubationDerivedCloselyRelatedSpecies","FledgingMean","FledgingDerivedHBW",
"FledgingDerivedCloselyRelatedSpecies")]
sp<-merge(sp,traits,by.x="ScientificName",by.y="Species",all.x=TRUE)
write.csv(sp,'/Users/daisy/Google Drive/PhD/BreedingTiming/tables/speciesOfInterest_2015-07-15.csv',row.names=FALSE)
|
e106d8dff70ffa1be29caeabb5e0a1a780af1e89
|
71ee5477aa69fa3bbd891ed71c9739342338bb39
|
/cachematrix.R
|
71d5b52127e56cd813c39e9eba7a18ab1a9699b5
|
[] |
no_license
|
kshieh/ProgrammingAssignment2
|
bb09613a160976495d4e56de3d50fc5b455d670b
|
6aadb02aa5dba66530b077e5f99b6391df2c36ff
|
refs/heads/master
| 2021-01-18T05:36:40.255530
| 2015-02-18T17:08:30
| 2015-02-18T17:08:30
| 30,698,260
| 0
| 0
| null | 2015-02-12T10:51:33
| 2015-02-12T10:51:32
| null |
UTF-8
|
R
| false
| false
| 1,440
|
r
|
cachematrix.R
|
## Functions to cache the inverse of a matrix, which can be a time-consuming
## calculation. When the inverse is needed again, it can be looked up in the
## cache instead of recomputed. These functions take advantage of the scoping
## rules in the R language to preserve state inside of an R object.
## Creates a special "matrix," which is a list containing a function to:
## 1. Set the value of the matrix
## 2. Get the value of the matrix
## 3. Set the value of the inverse
## 4. Get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Calculates the inverse of the special "matrix" created with the above
## function. However, it first checks to see if the inverse has already been
## calculated. If so, it gets the inverse from the cache and skips the
## computation. Otherwise, it calculates the inverse of the data using the
## solve function and sets the value of the inverse in the cache via the
## setinverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("Getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
6a8113e9b43b6886730d06014ead199775acc0bd
|
381917652785e00bdf86890a30fb73f2f3a84651
|
/man/standardize.Rd
|
52d5795ee7a0a8f027a187f581746f5e71606d10
|
[] |
no_license
|
cran/SIS
|
b1625b801a9a994825531b2b72e9907b867fec84
|
711e96f624e1421ecc1a68ac3f9a1a48b6059e6d
|
refs/heads/master
| 2020-05-22T14:36:04.971738
| 2020-01-27T16:10:07
| 2020-01-27T16:10:07
| 17,693,541
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,462
|
rd
|
standardize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardize.R
\name{standardize}
\alias{standardize}
\title{Standardization of High-Dimensional Design Matrices}
\usage{
standardize(X)
}
\arguments{
\item{X}{A design matrix to be standardized.}
}
\value{
A design matrix with standardized predictors or columns.
}
\description{
Standardizes the columns of a high-dimensional design matrix to mean zero
and unit Euclidean norm.
}
\details{
Performs a location and scale transform to the columns of the original
design matrix, so that the resulting design matrix with \eqn{p}-dimensional
observations \eqn{\{x_i : i=1,...,n\}} of the form
\eqn{x_i=(x_{i1},x_{i2},...,x_{ip})} satisfies \eqn{\sum_{i=1}^{n} x_{ij} =
0} and \eqn{\sum_{i=1}^{n} x_{ij}^{2} = 1} for \eqn{j=1,...,p}.
}
\examples{
set.seed(0)
n = 400; p = 50; rho = 0.5
corrmat = diag(rep(1-rho, p)) + matrix(rho, p, p)
corrmat[,4] = sqrt(rho)
corrmat[4, ] = sqrt(rho)
corrmat[4,4] = 1
corrmat[,5] = 0
corrmat[5, ] = 0
corrmat[5,5] = 1
cholmat = chol(corrmat)
x = matrix(rnorm(n*p, mean=15, sd=9), n, p)
x = x\%*\%cholmat
x.standard = standardize(x)
}
\references{
Diego Franco Saldana and Yang Feng (2018) SIS: An R package for Sure Independence Screening in
Ultrahigh Dimensional Statistical Models, \emph{Journal of Statistical Software}, \bold{83}, 2, 1-25.
}
\author{
Jianqing Fan, Yang Feng, Diego Franco Saldana, Richard Samworth, and
Yichao Wu
}
\keyword{models}
|
8187cd9b5900f9ff9470adedbbd632f7c64db867
|
7a0db46e0d8207d2e7cdb1447a2ed2029d97f47d
|
/man/fuzzy.CM.Rd
|
2aff25ec45e24aaee12aac280db1e63b8e2feaf6
|
[] |
no_license
|
fauzipandya/advfclust
|
726571a0436befde2482b2c2d002c41bd0711f14
|
ebeaa97ce8ca4e8aeea10695b0ebb7c09ede25d6
|
refs/heads/master
| 2020-09-21T19:37:09.567968
| 2016-09-24T16:45:42
| 2016-09-24T16:45:42
| 66,117,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,322
|
rd
|
fuzzy.CM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fuzzy.CM.R
\name{fuzzy.CM}
\alias{fuzzy.CM}
\title{Fuzzy C-Means}
\usage{
fuzzy.CM(X, K, m, max.iteration, threshold, member.init, RandomNumber = 0,
print.result = 0)
}
\arguments{
\item{X}{dataset (matrix/data frame)}
\item{K}{number of cluster}
\item{m}{fuzzyfier}
\item{max.iteration}{maximum iteration for convergence}
\item{threshold}{convergence criteria}
\item{member.init}{membership object or matrix that will be used for initialized}
\item{RandomNumber}{random number for start initializing}
\item{print.result}{print result (9/1)}
}
\value{
Fuzzy Clustering object
}
\description{
Fuzzy C-Means clustering Algorithm (Bezdek, 1984)
}
\details{
This function perform Fuzzy C-Means algorithm by Bezdek (1984).
Fuzzy C-Means is one of fuzzy clustering methods to clustering dataset
become K cluster. Number of cluster (K) must be greater than 1. To control the overlaping
or fuzziness of clustering, parameter m must be specified.
Maximum iteration and threshold is specific number for convergencing the cluster.
Random Number is number that will be used for seeding to firstly generate fuzzy membership matrix.
Clustering will produce fuzzy membership matrix (U) and fuzzy cluster centroid (V).
The greatest value of membership on data point will determine cluster label.
Centroid or cluster center can be use to interpret the cluster. Both membership and centroid produced by
calculating mathematical distance. Fuzzy C-Means calculate distance with Euclideans norm.
}
\section{Slots}{
\describe{
\item{\code{centroid}}{centroid matrix}
\item{\code{distance}}{distance matrix}
\item{\code{func.obj}}{function objective}
\item{\code{call.func}}{called function}
\item{\code{fuzzyfier}}{fuzzyness parameter}
\item{\code{method.fuzzy}}{method of fuzzy clustering used}
\item{\code{member}}{membership matrix}
\item{\code{hard.label}}{hard.label}
}}
\examples{
fuzzy.CM(iris[,1:4],K=3,m=2,max.iteration=100,threshold=1e-5,RandomNumber=1234)
}
\references{
Balasko, B., Abonyi, J., & Feil, B. (2002). Fuzzy Clustering and Data Analysis Toolbox: For Use with Matlab. Veszprem, Hungary.
Bezdek, J. C., Ehrlich, R., & Full, W. (1984). FCM: The Fuzzy C-Means Clustering Algorithm. Computers and Geosciences Vol 10, 191-203
}
|
266bea9dbab0da97c7165420fea1d399bacf442d
|
1528645b51076a3036642e54ebfc4aeb525ece92
|
/fastinR/man/simulation.Rd
|
924555765778f06a69911a43b38930f6e5e60cc7
|
[] |
no_license
|
Philipp-Neubauer/fastinR
|
e1d058bca1b4f40917cb2f4fec5cd4c41a9092ce
|
e6081d0e65348131a8f17058cc35e95cab0513b5
|
refs/heads/master
| 2021-07-10T19:23:52.889509
| 2018-06-20T10:42:13
| 2018-06-20T10:42:13
| 8,904,070
| 4
| 5
| null | 2017-02-28T01:09:18
| 2013-03-20T12:54:23
|
TeX
|
UTF-8
|
R
| false
| false
| 4,450
|
rd
|
simulation.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/Simulator.R
\name{simulation}
\alias{simulation}
\title{GUI: Simulate Fatty Acid profiles and Stable Isotope data}
\usage{
simulation()
}
\value{
\code{Write simulations to files} will produce a series of files that can be used as inputs to the fastinR gui or individual functions. The prefix of the files is entered by the user, the remainder of the filename suggests the contents:
\item{*_props}{Simualted diet proportions}
For Fatty Acids:
\item{*_FA_preys.csv}{Prey names (first column) and fatty acid profiles, with fatty acids names across the first row}
\item{*_FA_preds.csv}{Predator index (first column) and fatty acid profiles, with fatty acids names across the first row}
\item{*_fat_cont.csv}{Prey fat contents (here as mean and variance, can be specified for each prey sample for the main analysis, in that case the first column is the prey id and the second column is the individual sample's fat content)}
\item{*_FA_cc_means.csv}{Prey specific conversion coefficient means: Prey names (first column) and an n x P matrix for n preys and P fatty acids}
\item{*_FA_cc_var.csv}{Prey specific conversion coefficient variances, dimensions as for the means}
For Stable Isotopes:
\item{*_SI_preys.csv}{Prey names (first column) and stable isotope values, with SI names across the first row}
\item{*_SI_preds.csv}{Predator index (first column) and stable isotope values, with SI names across the first row}
\item{*_SI_fc_means.csv}{Prey specific SI additive fractionation coefficient means}
\item{*_SI_fc_var.csv}{Prey specific additive fractionation coefficient variance, dimensions as for the means}
For Covariates for predator proportions and grouped predators:
\item{*_Cov_n_Grp_effects.csv}{an n*p matrix of group and covariate influences on diet proportions for n preys and p (groups+covariates)}
\item{*_Groups.csv}{Group membership for each predator}
\item{*_Covariates.csv}{Covariate values for each predator}
}
\description{
GUI: Simulate Fatty Acid profiles and Stable Isotope data
}
\details{
Disclaimer: R gui facilities are somewhat unstable and have a mind of their own. Often unloading the package and re-loading it will fix glitches, but not always. Therefore, the command line is the suggested way to use the apckage.
The function calls a gui to simulate data, functions are of little use outside the gui facility.
The simualtion is sequential, meaning one needs to first decide on thumber of samples for predator and prey items (top sliders in simulation window), then simulate diet proportions (first row of buttons in window, will open additional windows), then marker data (second row), which can then be plotted and/or saved to file (third row).
Specifically, simulating data proceeds by selecting to simulate either individual diet proportions for each predator as drawn from a population level distribution for diet proportions, or by simulating grouped diet proportions (e.g., size groups, geographic groups) and/or by letting covariates influence the drawn diet proportions (e.g., size based diet shifts).
Pressing \code{Simulate Fatty Acid data} or \code{Simulate Stable Isotope data} makes sense only after proportions were simulated - converseley, when updating proportions, FA and/or SI data need to be re-simulated to have the new diet proportions alter the predator fatty acid/SI makeup.
\code{Plot current simulation} will draw a NMDS (non-metric multidimensional scaling) plot to show the simulated data. Note that there are somtimes funny interactions between the GUI and plot windows, especially in Rstudio. Sometimes you will need to press cancel in order to see the plots.
The gui will remain open to allow for many tries at simulating data. Variuous configurations can be written to file to allow for exploration of the model with \code{fastinR_GUI()}.
}
\examples{
\dontrun{simulation()}
}
\author{
Philipp Neubauer
}
\references{
Neubauer.P. and Jensen, O.P. (in prep)
}
\seealso{
\code{\link{fastinR_GUI}},\code{\link{add_FA}},\code{\link{add_SI}},\code{\link{run_MCMC}}
}
|
a96ef1262bf5bf080e91844cc751a92704abf539
|
d936566997935dc6f85c96f9408fe9fb3f2a0efc
|
/code/generate_stat_graphs.R
|
fd4970045d6e7020003775b21cc3c5ffb8c68dbe
|
[] |
no_license
|
suryaeada9/imsi_biocro
|
9472674be098b8aee83eebd56bec235abfbdd605
|
e1cd736b9985a4927a80aabd5b383c88db804f02
|
refs/heads/main
| 2023-07-01T09:02:09.922215
| 2021-08-06T22:13:05
| 2021-08-06T22:13:05
| 386,355,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,346
|
r
|
generate_stat_graphs.R
|
library(ggplot2)
library(dplyr)
library(patchwork)
#for sorghum graphs
setwd("C://Users/stark/OneDrive/Documents2021/biocro-dev/sorghum_stat_graphs_v3")
sorghum_stats <- read.csv("C:/Users/stark/OneDrive/Documents2021/biocro-dev/sorghum_stats.csv")
sorghum_stats['quality_stat'] <- 100 * (1-log(sorghum_stats[['Mahalanobis_normed']]))
sorghum_stats <- sorghum_stats %>% filter((grepl('all',training_set) & grepl('all',test_set)) | (grepl('final',training_set) & grepl('final',test_set)))
n <- nrow(sorghum_stats)
View(sorghum_stats)
training_or_test <- function(training_set,test_set){
if(training_set == test_set){
return('Training')
}
else{
return('Validation')
}
}
final_or_all <- function(training_set){
if(grepl('all',training_set)){
return("All Data")
}
else{
return("Final Yield Only")
}
}
for(i in 1:n){
sorghum_stats[i,'training_test_same'] <- (training_or_test(sorghum_stats[i,'training_set'],sorghum_stats[i,'test_set']))
sorghum_stats[i,'final_or_all'] <- (final_or_all(sorghum_stats[i,'training_set']))
}
#make a pdf file
pdf(
file = "RSquared.pdf",
width = 12, # inches
height = 12, # inches
useDingbats = FALSE # make sure symbols are rendered properly in the PDF
)
#graph to appear on the pdf
graph_all <- ggplot(data=sorghum_stats,aes(x=training_test_same,y=RSquared)) +
geom_point(aes(color=final_or_all,size=3,alpha=1/5)) +
xlab("Data") + #x-axis label
ylab("R-Squared") + #y-axis label
labs(title = "R-Squared Sorghum") +
theme(legend.position = "bottom") + #put legend at bottom
scale_color_manual(name = "Training Set",values = c("blue", "red")) +
guides(size = FALSE, alpha = FALSE)
print(graph_all)
dev.off()
#for miscanthus graphs
setwd("C://Users/stark/OneDrive/Documents2021/biocro-dev/miscanthus_stat_graphs_v3")
misc_stats <- read.csv("C:/Users/stark/OneDrive/Documents2021/biocro-dev/miscanthus_stats.csv")
misc_stats <- misc_stats %>% filter((grepl('Peak',training_set) & grepl('Peak',test_set)) | (grepl('Only',training_set) & grepl('Only',test_set)))
n <- nrow(misc_stats)
final_or_all <- function(training_set){
if(grepl('Peak',training_set)){
return("Harvest and Peak")
}
else{
return("Harvest Only")
}
}
for(i in 1:n){
misc_stats[i,'training_test_same'] <- (training_or_test(misc_stats[i,'training_set'],misc_stats[i,'test_set']))
misc_stats[i,'final_or_all'] <- (final_or_all(misc_stats[i,'training_set']))
}
#make a pdf file
pdf(
file = "ChiSquared.pdf",
width = 12, # inches
height = 12, # inches
useDingbats = FALSE # make sure symbols are rendered properly in the PDF
)
#graph to appear on the pdf
graph_all <- ggplot(data=misc_stats,aes(x=training_test_same,y=ChiSquared)) +
geom_point(aes(color=final_or_all,size=3,alpha=1/5)) +
xlab("Data") + #x-axis label
ylab("Chi-Squared") + #y-axis label
labs(title = "Chi-Squared Miscanthus") +
theme(legend.position = "bottom") + #put legend at bottom
scale_color_manual(name = "Training Set",values = c("blue", "red")) +
guides(size = FALSE, alpha = FALSE)
print(graph_all)
dev.off()
#make a pdf file
pdf(
file = "MAE.pdf",
width = 11, # inches
height = 30, # inches
useDingbats = FALSE # make sure symbols are rendered properly in the PDF
)
#graphs to appear on the pdf
Leave_Out_Wageningen <- ggplot(data=misc_stats %>% filter(grepl("AdAbMPS",training_set) & grepl("Peak and Harvest",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Peak and Harvest Fit Leaving Out Wageningen") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
H_Only_Leave_Out_Wageningen <- ggplot(data=misc_stats %>% filter(grepl("AdAbMPS",training_set) & grepl("Harvest Only",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Harvest Only Fit Leaving Out Wageningen") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
Leave_Out_Stuttgart <- ggplot(data=misc_stats %>% filter(grepl("AdAbMPW",training_set) & grepl("Peak and Harvest",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Peak and Harvest Fit Leaving Out Stuttgart") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
H_Only_Leave_Out_Stuttgart <- ggplot(data=misc_stats %>% filter(grepl("AdAbMPW",training_set) & grepl("Harvest Only",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Harvest Only Fit Leaving Out Stuttgart") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
Leave_Out_Potash <- ggplot(data=misc_stats %>% filter(grepl("AdAbMSW",training_set) & grepl("Peak and Harvest",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Peak and Harvest Fit Leaving Out Potash") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
H_Only_Leave_Out_Potash <- ggplot(data=misc_stats %>% filter(grepl("AdAbMSW",training_set) & grepl("Harvest Only",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Harvest Only Fit Leaving Out Potash") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
Leave_Out_Moscow <- ggplot(data=misc_stats %>% filter(grepl("AdAbPSW",training_set) & grepl("Peak and Harvest",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Peak and Harvest Fit Leaving Out Moscow") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
H_Only_Leave_Out_Moscow <- ggplot(data=misc_stats %>% filter(grepl("AdAbPSW",training_set) & grepl("Harvest Only",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Harvest Only Fit Leaving Out Moscow") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
Leave_Out_Aberystwyth <- ggplot(data=misc_stats %>% filter(grepl("AdMPSW",training_set) & grepl("Peak and Harvest",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Peak and Harvest Fit Leaving Out Aberystwyth") +
scale_color_manual(name = "Test Set",values = c("red", "blue")) +
theme(legend.position = "bottom") #put legend at bottom
H_Only_Leave_Out_Aberystwyth <- ggplot(data=misc_stats %>% filter(grepl("AdMPSW",training_set) & grepl("Harvest Only",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Harvest Only Fit Leaving Out Aberystwyth") +
scale_color_manual(name = "Test Set",values = c("red", "blue")) +
theme(legend.position = "bottom") #put legend at bottom
Leave_Out_Adana <- ggplot(data=misc_stats %>% filter(grepl("AbMPSW",training_set) & grepl("Peak and Harvest",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Peak and Harvest Fit Leaving Out Adana") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
H_Only_Leave_Out_Adana <- ggplot(data=misc_stats %>% filter(grepl("AbMPSW",training_set) & grepl("Harvest Only",test_set)), aes(x=factor(training_set,levels = rev(levels(factor(training_set)))),y=MAE, group=test_set)) +
geom_line(aes(color=test_set))+
geom_point() +
xlab("Training Set") + #x-axis label
ylab("MAE") + #y-axis label
labs(title = "MAE Harvest Only Fit Leaving Out Adana") +
scale_color_manual(name = "Test Set",values = c("blue", "red")) +
theme(legend.position = "bottom") #put legend at bottom
#prints all the graphs on one page
print((Leave_Out_Adana + H_Only_Leave_Out_Adana) / (Leave_Out_Aberystwyth + H_Only_Leave_Out_Aberystwyth) / (Leave_Out_Moscow + H_Only_Leave_Out_Moscow)/ (Leave_Out_Potash + H_Only_Leave_Out_Potash) / (Leave_Out_Stuttgart + H_Only_Leave_Out_Stuttgart)/ (Leave_Out_Wageningen + H_Only_Leave_Out_Wageningen))
dev.off()
|
04c8f12430cd649d736fde93a57e2ed400e49d45
|
7322d471211d096da536eb3383753fd75c994807
|
/res/rol-4.r
|
f717ac95e12b9e5a6eddb1633a6938838777fd99
|
[] |
no_license
|
HeitorBRaymundo/861
|
fbe09d7d226511895a493174b90fcdfe0a7f490d
|
cfcedf9097280073d4325fc9c5d7e28ba3633a52
|
refs/heads/master
| 2020-06-28T18:22:50.598143
| 2019-11-01T21:45:39
| 2019-11-01T21:45:39
| 200,306,264
| 0
| 0
| null | 2019-11-01T21:46:10
| 2019-08-02T22:48:20
|
Assembly
|
UTF-8
|
R
| false
| false
| 1,295
|
r
|
rol-4.r
|
| pc = 0xc002 | a = 0x14 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc005 | a = 0x14 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0300] = 0x14 |
| pc = 0xc008 | a = 0x14 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00b | a = 0x14 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00e | a = 0x50 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0300] = 0x50 |
| pc = 0xc011 | a = 0x50 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc014 | a = 0x50 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc017 | a = 0x50 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc01a | a = 0x50 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc01d | a = 0x50 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc020 | a = 0x50 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc023 | a = 0x50 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc026 | a = 0x3c | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0300] = 0x3c |
| pc = 0xc029 | a = 0x3c | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
|
d22fe5d5e0c37bcb7617de767eeaea028ddd070a
|
f454ace78f514cde609ff15152f347441286dd60
|
/cachematrix.R
|
3bebc205a141924e45374b978ffcfff0db20221c
|
[] |
no_license
|
jasonschaad/ProgrammingAssignment2
|
2468e66c74a16d7eab7d859c2f3e8d66418811af
|
5114635a95ab513f5209b62d71df55131e2f899b
|
refs/heads/master
| 2022-12-30T07:48:19.109990
| 2020-09-09T20:12:29
| 2020-09-09T20:12:29
| 294,206,388
| 0
| 0
| null | 2020-09-09T19:13:19
| 2020-09-09T19:13:18
| null |
UTF-8
|
R
| false
| false
| 1,616
|
r
|
cachematrix.R
|
## Taking the inverse of a matrix is very computationally expensive.
## In the case where we have to compute the inverse multiple times
## of the same matrix, we will cache inverse so if needed again we
## do not have to compute but only fetch it from the cache.
## Creates a special "matrix", which is really a list containing functions to
## set the value of the matrix
## get the value of the matrix
## set the value of the inverse
## set the value of the mean
## Assume the matrix is invertible.
makeCacheMatrix <- function(x = matrix()) {
## placeholder for a future value
inv <- NULL
## defines a functions to set y to a new matrix y and resets the inv to NULL
set <- function(y) {
## double arrow operator can modify variables in the parent level
x <<- y
inv <<- NULL
}
get <- function() {x}
setinverse <- function(inverse) {inv <<- inverse}
getinverse <- function() {inv}
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Return a matrix that is the inverse of 'x'
## First checks if inverse has already been calculated
## If yes, it gets the inverse from the cache
## Otherwise, it computes the inverse of the matrix using the solve command
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting the cached inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
7a471332cd5534b1955ca1e5e7abffd77a31fdc4
|
7e6a3b7d3f0f4295e3473fa7438192fed0f27a10
|
/predict.R
|
21b880bedd11a82c182bd29d0889e4c1912a2d28
|
[] |
no_license
|
DKooijman97/MUVR_Selectivity_Ratio
|
ecb1ba3a2cc4100b781428b93e01c6b92ca05249
|
de8cbe730bc1b25370d2527493b551709134b232
|
refs/heads/master
| 2020-05-20T04:27:57.494950
| 2019-05-07T11:11:42
| 2019-05-07T11:11:42
| 185,384,887
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,359
|
r
|
predict.R
|
#' Predict pls
#'
#' Adapted and stripped down from mixOmics v 5.2.0 (https://cran.r-project.org/web/packages/mixOmics/)
#'
#' @param object a plsMUVR object
#' @param newdata new data
#' @param onlyPred Boolean for whether to report back predictions only (defaults to FALSE)
#' @param scale
#' @param ...
#'
#' @return pls prediction
#' @export
predict.plsMUVR <-function(object, newdata, onlyPred=FALSE, scale=TRUE, ...){
#-- validation des arguments --#
if (missing(newdata)) stop("No new data available.")
x = object$X
y = object$Y
q = ncol(y)
p = ncol(x)
if (ncol(x)!=ncol(newdata) & length(object$nzv$Position)>0) {
newdata=newdata[,-object$nzv$Position]
}
if (length(dim(newdata)) == 2) {
if (ncol(newdata) != p)
stop("'newdata' must be a numeric matrix with ncol = ", p,
" or a vector of length = ", p, ".")
if(!identical(colnames(x),colnames(newdata)))
stop("Mismatch between columns in model and newdata.")
}
if (length(dim(newdata)) == 0) {
if (length(newdata) != p)
stop("'newdata' must be a numeric matrix with ncol = ", p,
" or a vector of length = ", p, ".")
dim(newdata) = c(1, p)
}
#-- initialisation des matrices --#
ncomp = object$ncomp
a = object$loadings$X
b = object$loadings$Y
c = object$mat.c
newdata = as.matrix(newdata)
if (scale) {
means.x = attr(x, "scaled:center")
sigma.x = attr(x, "scaled:scale")
newdata=scale(newdata,center=means.x,scale=sigma.x)
}
means.y = attr(y, "scaled:center")
sigma.y = attr(y, "scaled:scale")
##- coeff de regression
B.hat = array(0, dim = c(p, q, ncomp))
##- prediction
y.hat = y.hat2 = array(0, dim = c(nrow(newdata), q, ncomp))
##- variates
t.pred = array(0, dim = c(nrow(newdata), ncomp))
variates.x = object$variates$X
betay = list()
#-- prediction --#
for(h in 1:ncomp){
dd= coefficients(lm(y~variates.x[,1:h,drop=FALSE])) #regression of y on variates.global.x => =loadings.global.y at a scale factor
if (q==1) betay[[h]]=(dd[-1]) else betay[[h]]=(dd[-1,])
W = a[, 1:h,drop=FALSE] %*% solve(t(c[, 1:h,drop=FALSE]) %*% a[, 1:h,drop=FALSE])
B = W %*% drop(betay[[h]])
y.temp=newdata %*% as.matrix(B) #so far: gives a prediction of y centered and scaled
y.temp=scale(y.temp,center=FALSE,scale=1/sigma.y) #so far: gives a prediction of y centered, with the right scaling
y.temp=scale(y.temp,center=-means.y,scale=FALSE) #so far: gives a prediction of y with the right centering and scaling
y.hat[, , h] = y.temp # we add the variance and the mean of y used in object to predict
t.pred[, h] = newdata %*% W[, h]
B.hat[, , h] = B
} #end h
#-- valeurs sortantes --#
rownames(t.pred) = rownames(newdata)
colnames(t.pred) = paste("dim", c(1:ncomp), sep = " ")
rownames(y.hat) = rownames(newdata)
colnames(y.hat) = colnames(y)
if (onlyPred) return(invisible(list(predict = y.hat))) else return(invisible(list(predict = y.hat, variates = t.pred, B.hat = B.hat,betay=betay)))
}
#' Predict plsda
#'
#' Adapted and stripped down from mixOmics v 5.2.0 (https://cran.r-project.org/web/packages/mixOmics/)
#'
#' @param object a plsdaMUVR object
#' @param newdata new data
#' @param onlyPred Boolean for whether to report back predictions only (defaults to FALSE)
#' @param ...
#'
#' @return plsda predictions
#' @export
predict.plsdaMUVR=function(object, newdata, onlyPred=FALSE, scale=TRUE, ...) {
#-- validation des arguments --#
if (missing(newdata)) stop("No new data available.")
x = object$X
y = object$Y
yprim = object$ind.mat
q = ncol(yprim)
p = ncol(x)
if (ncol(x)!=ncol(newdata) & length(object$nzv$Position)>0) {
newdata=newdata[,-object$nzv$Position]
}
if (length(dim(newdata)) == 2) {
if (ncol(newdata) != p)
stop("'newdata' must be a numeric matrix with ncol = ", p,
" or a vector of length = ", p, ".")
if(!identical(colnames(x),colnames(newdata)))
stop("Mismatch between columns in model and newdata.")
}
if (length(dim(newdata)) == 0) {
if (length(newdata) != p)
stop("'newdata' must be a numeric matrix with ncol = ", p,
" or a vector of length = ", p, ".")
dim(newdata) = c(1, p)
}
#-- initialisation des matrices --#
ncomp = object$ncomp
a = object$loadings$X
b = object$loadings$Y
c = object$mat.c
newdata = as.matrix(newdata)
if (scale) {
means.x = attr(x, "scaled:center")
sigma.x = attr(x, "scaled:scale")
newdata=scale(newdata,center=means.x,scale=sigma.x)
}
means.y = attr(y, "scaled:center")
sigma.y = attr(y, "scaled:scale")
##- coeff de regression
B.hat = array(0, dim = c(p, q, ncomp))
##- prediction
y.hat = array(0, dim = c(nrow(newdata), q, ncomp))
##- variates
t.pred = array(0, dim = c(nrow(newdata), ncomp))
variates.x = object$variates$X
betay = list()
#-- prediction --#
for(h in 1:ncomp){
dd= coefficients(lm(y~variates.x[,1:h,drop=FALSE])) #regression of y on variates.global.x => =loadings.global.y at a scale factor
if(q==1) betay[[h]]=(dd[-1]) else betay[[h]]=(dd[-1,])
W = a[, 1:h,drop=FALSE] %*% solve(t(c[, 1:h,drop=FALSE]) %*% a[, 1:h,drop=FALSE])
B = W %*% drop(betay[[h]])
y.temp=newdata %*% as.matrix(B) #so far: gives a prediction of y centered and scaled
y.temp=scale(y.temp,center=FALSE,scale=1/sigma.y) #so far: gives a prediction of y centered, with the right scaling
y.temp=scale(y.temp,center=-means.y,scale=FALSE) #so far: gives a prediction of y with the right centering and scaling
y.hat[, , h] = y.temp # we add the variance and the mean of y used in object to predict
t.pred[, h] = newdata %*% W[, h]
B.hat[, , h] = B
} #end h
#-- valeurs sortantes --#
rownames(t.pred) = rownames(newdata)
colnames(t.pred) = paste("dim", c(1:ncomp), sep = " ")
rownames(y.hat) = rownames(newdata)
colnames(y.hat) = colnames(y)
if (onlyPred) return(invisible(list(predict = y.hat))) else return(invisible(list(predict = y.hat, variates = t.pred, B.hat = B.hat,betay=betay)))
}
|
fcc7e9751ae1edd7af35adaaa737babb47136531
|
ea0904825812f1c80bedb575cb5bb5b7da7ec9c0
|
/R/function-buildPairwiseComparisonMatrix.R
|
8db157d6c73450406298a4d52bf00d3e90356fa4
|
[] |
no_license
|
cran/FuzzyAHP
|
f1cebe1e55d01956d04010250b1d189ae4e8167c
|
1e2015389867bdab2351fe7ba9a34e2b534ae331
|
refs/heads/master
| 2021-01-10T13:17:36.203197
| 2019-12-06T15:40:02
| 2019-12-06T15:40:02
| 55,608,658
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,340
|
r
|
function-buildPairwiseComparisonMatrix.R
|
#' Function that builds Pairwise Comparison Matrix based on list of Pairwise Comparison Matrices
#'
#' @description
#' This functions builds Pairwise Comparison Matrix based on list of Pairwise Comparison Matrices
#' the resulting Pairwise Comparison Matrix is calculated as geometric mean of all
#' Pairwise Comparison Matrices in \code{listOfMatrices}.
#'
#' @param listOfMatrices An object of \code{\linkS4class{list}}.
#'
#' @return An object of class \code{\linkS4class{PairwiseComparisonMatrix}}
#'
#' @export
#' @rdname buildPairwiseComparisonMatrix-methods
#' @name buildPairwiseComparisonMatrix
setGeneric("buildPairwiseComparisonMatrix",
function(listOfMatrices) standardGeneric("buildPairwiseComparisonMatrix"))
#' @rdname buildPairwiseComparisonMatrix-methods
#' @aliases buildPairwiseComparisonMatrix,list-method
setMethod(
f="buildPairwiseComparisonMatrix",
signature(listOfMatrices = "list"),
definition=function(listOfMatrices)
{
number = length(listOfMatrices)
size = nrow(listOfMatrices[[1]]@values)
for(i in 1:number){
if (class(listOfMatrices[[i]]) != "PairwiseComparisonMatrix"){
stop(paste0("Element on position ", i, " is not of class PairwiseComparisonMatrix. Its type is ", class(listOfMatrices[[i]]), "."))
}
if (dim(listOfMatrices[[1]]@values)[1] != dim(listOfMatrices[[i]]@values)[1] &&
dim(listOfMatrices[[1]]@values)[2] != dim(listOfMatrices[[i]]@values)[2]){
stop(paste0("PairwiseComparisonMatrices do not have the same sizes: [", dim(listOfMatrices[[1]]@values)[1], ",",
dim(listOfMatrices[[1]]@values)[2], "] != [", dim(listOfMatrices[[i]]@values)[1], ",",
dim(listOfMatrices[[1]]@values)[2], "]."))
}
}
resultMatrix = listOfMatrices[[1]]@values
for (i in 1:size){
for (j in 1:size){
vector = c()
for (k in 1:number){
vector = c(vector, listOfMatrices[[k]]@values[i, j])
}
resultMatrix[i, j] = prod(vector)^(1/number)
}
}
textMatrix = .textMatrixRepresentation(resultMatrix)
return(new("PairwiseComparisonMatrix", valuesChar = textMatrix, values = resultMatrix,
variableNames = listOfMatrices[[1]]@variableNames))
}
)
|
8e12c65accf2f102e761913269f6dccc9b83f6d9
|
7b8478fa05b32da12634bbbe313ef78173a4004f
|
/tests/testthat/test-summarise.R
|
8dc556b4d4d40ce368bd2b5df58fcb2e20b73662
|
[] |
no_license
|
jeblundell/multiplyr
|
92d41b3679184cf1c3a637014846a92b2db5b8e2
|
079ece826fcb94425330f3bfb1edce125f7ee7d1
|
refs/heads/develop
| 2020-12-25T18:02:10.156393
| 2017-11-07T12:48:41
| 2017-11-07T12:48:41
| 58,939,162
| 4
| 1
| null | 2017-11-07T12:01:35
| 2016-05-16T14:30:38
|
R
|
UTF-8
|
R
| false
| false
| 1,961
|
r
|
test-summarise.R
|
context("summarise")
#summarise
cl2 <- parallel::makeCluster(2)
test_that ("summarise() works on ungrouped data", {
dat <- Multiplyr (x=1:100, y=rep(2, 100), alloc=1, cl=cl2)
dat %>% summarise (x=length(x), y=length(y)/2, z=sum(y))
expect_equal (dat["x"], c(50, 50))
expect_equal (dat["y"], c(25, 25))
expect_equal (dat["z"], c(100, 100))
rm (dat)
})
test_that ("summarise() works on grouped data", {
dat <- Multiplyr (x=1:100, y=rep(2, 100),
G=rep(c("A", "B", "C", "D"), each=25),
alloc=1, cl=cl2)
dat %>% partition_group (G)
dat %>% summarise (x=length(x), y=length(y)/2, z=sum(y))
expect_equal (dat["x"], rep(25, 4))
expect_equal (dat["y"], rep(12.5, 4))
expect_equal (dat["z"], rep(50, 4))
rm (dat)
})
test_that ("summarise() works with transmute/rename", {
dat <- Multiplyr (x=1:100, y=rep(2, 100),
G=rep(c("A", "B", "C", "D"), each=25),
alloc=1, cl=cl2)
dat %>% partition_group(G)
dat %>%
rename(a=x, b=y) %>%
transmute(b = b*2) %>%
summarise(x=length(b), y=sum(b))
expect_equal (dat["x"], rep(25, 4))
expect_equal (dat["y"], rep(100, 4))
rm (dat)
})
test_that ("summarise() throws an error if no parameters or non-Multiplyr", {
dat <- Multiplyr (x=1:100, y=rep(2, 100), alloc=1, cl=cl2)
expect_error (dat %>% summarise(), "operations")
expect_error (data.frame(x=1:100) %>% summarise(N=length(x)), "Multiplyr")
rm (dat)
})
test_that ("reduce() keeps the right columns", {
dat <- Multiplyr (x=1:100, y=rep(2, 100),
G=rep(c("A", "B", "C", "D"), each=25),
alloc=1, cl=cl2)
dat %>% group_by (G) %>% summarise (N=length(x)) %>% reduce(N=sum(N))
expect_equal (names(dat), c("G", "N"))
rm (dat)
})
#Attempt to stop "no function to return from, jumping to top level"
gc()
parallel::stopCluster(cl2)
|
7e1d689a7dc93e8e82ec31361db5c7ffa2c5ac85
|
f586cc3599f8685ffed9f10befa8bef0dd761cd4
|
/R/vconf.R
|
3d74d7c37926845c9e2c22531c9539ffd8a5e24f
|
[] |
no_license
|
cran/mrt
|
87bd3d0b56c73c95146ab1c1d8703f8a303e3c89
|
b2ad5f7db7432499d81f827812b2cfbf068132c1
|
refs/heads/master
| 2020-04-07T15:45:38.872572
| 2009-08-17T00:00:00
| 2009-08-17T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 676
|
r
|
vconf.R
|
vconf <-
function(ctab,clevel=.95){
if (clevel < .5)
warning("With low confidence values interval may not include sample value")
# This function ignores the warning if any E_ij < 5
suppressWarnings(chsq <- chisq.test(ctab,correct=FALSE))
df <- chsq$parameter
stat <- chsq$statistic
N <- sum(ctab)
cls <- conf.limits.nc.chisq(stat,clevel,df)
K <- min(dim(ctab)[1]-1,dim(ctab)[2]-1)
V <- sqrt(stat/(N*K)); names(V) <- "Cramer's V"
Vlb <- sqrt((cls$Lower.Limit+df)/(N*K)); names(Vlb) <- "lower bound"
Vub <- sqrt((cls$Upper.Limit+df)/(N*K)); names(Vub) <- "upper bound"
outv <- list(V,Vlb,Vub)
names(outv) <- c("V","Vlb","Vub")
return(outv)}
|
010bc8c6826a072d5a08d25941ed7905e2b0c155
|
ed3f052100d437c57c52b737d4c0f91d3b20c0af
|
/scriptsAndDatabase/detective_clean/unused/ALBERTclean.R
|
22fb0343fd697c5cb4fa4ad29a10edaf8b7ef677
|
[] |
no_license
|
timschott/dmp
|
216f1b21a95dce67f55097fccf1dce599fd7884b
|
13859bda6e9adefc1ebbf26a98a299c457b4468e
|
refs/heads/master
| 2021-10-14T11:49:54.435887
| 2021-10-05T02:59:56
| 2021-10-05T02:59:56
| 156,625,032
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,828
|
r
|
ALBERTclean.R
|
setwd("~/Documents/7thSemester/dmp/corpus")
library("RSQLite")
library("tokenizers")
library("dplyr")
library("textclean")
library(stringi)
library("stringr")
library("tm")
library(qdap)
# just do the paragraphs first then distill down.
alb.paragraphs <- read.csv("Python_Scripts/checkCorpus/ALBERT_paras.csv", stringsAsFactors = FALSE)
alb.paragraphs$X0[20]
alb.paragraphs <- alb.paragraphs[-c(1:18,2241:2244),]
colnames(alb.paragraphs) <- c("arb", "paragraphs")
alb.paragraphs$paragraphs[c(779, 1979)]
# 6, 11, 14, 17*, 22, 31, 34, 35*, 47, 53, 67, 71, 77, 78*, 94, 95
# *Illustration
alb.paragraphs$paragraphs[174] <- "The earl's face had brightened at the prospect of meeting his fiancée under the favourable conditions of Brett's presence. But he yielded with\ngood grace, and promptly sat down to write a brief note explanatory of\nthe barrister's identity and position in the inquiry."
alb.paragraphs$paragraphs[284] <- "The appearance of Winter at the door caused the gaping idlers in the\nstreet to endeavour to draw nearer to the mysterious portals. Thereupon\nthree policemen on duty outside hustled the mob back, and Brett took\nadvantage of the confusion thus created to slip to the doorway almost\nunperceived. One of the police constables turned round to make a grab at\nhim, but a signal from a confrère inside prevented this, and Brett\nquickly found himself within a spacious entrance hall with the door\nclosed and bolted behind him."
alb.paragraphs$paragraphs[288] <- "Inspector Walters assumed the role of guide."
alb.paragraphs$paragraphs[317] <- "\"I expected as much,\" he said, taking hold of the torn part of the\nscreen and giving it a vigorous pull, with the result that a small\npiece, measuring about eight inches by six, came bodily out. \"This has\nbeen cut away, as you will see, by some instrument which did not even\nbend the wire. It was subsequently replaced, whilst the fractured parts\nwere sufficiently cemented by some composition to retain this section in\nits place, and practically defy observation. There was nothing for it\nbut force to reveal it thus early. No doubt in time the composition\nwould have dried, or been washed away, and then this bit of the screen\nwould have fallen out by the action of wind and weather. Here, at any\nrate, is a hole in your defensive armour.\" He held out the pièce de conviction to the discomfited Sharpe, who surveyed it in silence."
alb.paragraphs$paragraphs[385] <- "He did not explain to his professional confrère that it was a positive\nstimulant to his abounding energy and highly-strung nerves to find that\nhe was actually following the path taken by the criminal whom he was\npursuing. The mere fact lent reality to the chase. For a mile, at any\nrate, there could be no mistake, though he might expect a check at the\nCarlton. Arrived there, Brett alighted."
alb.paragraphs$paragraphs[554] <- "On their way they captured a railway official and told him to reserve a coupè lit compartment. In the midst of their hasty meal the Frenchman\narrived, voluble, apologetic. The train was crowded. Never had there\nbeen such a rush to the South. By the exercise of most profound care he\nhad secured them two seats in a compartment, but the third had already\ntaken itself. He was sorry for it; he had done his best."
alb.paragraphs$paragraphs[621] <- "He glanced at his watch. \"It is just about time for déjeuner,\" he\ncontinued. \"What do you say if we drive to the Rue Barbette at once?\""
alb.paragraphs$paragraphs[642] <- "\"Have you had déjeuner, or have you time to join me in a cigarette?\"\nhe went on."
alb.paragraphs$paragraphs[775] <- ""
alb.paragraphs$paragraphs[872] <- "\"That is the way people live in Paris, my dear fellow. Life is an\nartificial matter here. But all this excitement has made me hungry. Let\nus have déjeuner.\""
alb.paragraphs$paragraphs[1382] <- "\"'_Vous etes un très bel Anglais, mon vieux,_' she cried, coquettishly\nsetting her head on one side and glancing first at him and then at me.\""
alb.paragraphs$paragraphs[1407] <- "Brett now deemed it advisable to take the commissary of police fully\ninto his confidence. The official promptly suggested that every\npersonage in Paris connected even remotely with the mystery--Gros Jean,\nthe Turks, the waiter at the Café Noir, and even the little thief \"Le\nVer\"--should be arrested and subjected to a procès-verbal."
alb.paragraphs$paragraphs[1422] <- ""
alb.paragraphs$paragraphs[1690] <- "It is a most curious fact that young ladies in the engaged stage regard\ntheir fiancée's male friends with extreme suspicion; the more\nenthusiastic the man, the more suspicious the woman."
alb.paragraphs$paragraphs[1793] <- "He had hardly quitted the hotel when a waiter announced that a jeune Française wished to see Mr. Brett."
alb.paragraphs$paragraphs[1949] <- "\"Voilà! Ils viennent! Venez vite!_\" cried Gros Jean."
alb.paragraphs$paragraphs[2220] <- "\"What a darling!\" cried Edith. \"I do wish he would say something. Cher Prophète, parlez avec moi!_\""
alb.paragraphs$paragraphs[2222] <- "\"_Vive Mahomet! Vive le Sultan! ¿ bas les Grecs! ‡ bas! ‡ bas!_\""
alb.paragraphs$paragraphs[779] <- ""
alb.paragraphs$paragraphs[1979] <- ""
alb.paragraphs$paragraphs[583] <- "Although Gaultier had not said as much, Brett guessed that his destination was the British Embassy in the Rue du Faubourg St. Honoré. The route followed by the cabman led straight to that well-known locality. The Frenchman in the second cab evidently thought likewise, for, at the corner of the Rue Boissy he pulled up, and Brett was just in time to give his driver instructions to go ahead and thus avoid attracting undue notice to himself."
alb.paragraphs$paragraphs[597] <- "\"Yes,\" replied the King's messenger, \"and what is more, I have discovered his residence since we parted. It seems that one of the attachés at the Embassy met him recently and thought it advisable to keep in touch with the Young Turkish party, of which Hussein-ul-Mulk is a shining light. So he asked him where he lived, and as the result I have jotted down the address in my note-book.\" Gaultier searched through his memoranda, and speedily found what he wanted."
alb.paragraphs$paragraphs[653] <- "Gaultier knew that there was more behind the apparent exchange of compliments than appeared on the surface. Having fulfilled his pledge to Brett, he said hurriedly, \"Both of you gentlemen will understand that I cannot very well take part in a political discussion. With your permission, Hussein, I will now leave my friend with you for a half-hour's chat, as I have an appointment at the Café Riche.\""
alb.paragraphs$paragraphs[855]<-"\"Thank you,\" said Brett. The two re-entered their cab, and Brett told the driver to proceed as rapidly as possible to the Rue St. Honoré."
alb.paragraphs$paragraphs[875] <- "On their way to the hotel, Brett, yielding apparently to a momentary impulse, stopped the cab at a house in the Rue du Chaussée d'Antin. Without any explanation to Lord Fairholme he disappeared into the interior, and did not rejoin his companion for nearly ten minutes."
# loop to find non UTF 8 lines. jeez.
bad_spots <-c(0)
for(i in seq(1:length(alb.paragraphs$paragraphs))){
if(all(stri_enc_isutf8(alb.paragraphs$paragraphs[i]))==FALSE){
bad_spots<-append(bad_spots, i+1)
}
}
# bad_spots
alb.paragraphs$paragraphs[1174] <- "Apologising to André with a laugh, he then sauntered towards the front café, where he purchased another drink at the counter. He assured\nhimself that he had not been mistaken. The only private door out of the\nbar led into the passage, so that the room beyond could only be reached\nby a staircase or through a trap-door."
alb.paragraphs$paragraphs[1549] <- "How much further the revelations as to Père Didon's iniquity might have\ngone, Miss Talbot could not say, but at that moment there came an\ninterruption."
alb.paragraphs$paragraphs[1924] <- "\"Ah, monsoo,\" he cried with boisterous good humour, \"permittez-moi\nintroducer un friend of mine, Monsoo Smeeth, de Londres, you know. Je ne\nsavez pas les noms de votre companiongs, but they are très bons camarades, je suis certain.\""
alb.paragraphs$paragraphs[908] <- "Soon after three o'clock a report arrived from the agent in the Rue du Chaussée d'Antin. It read--"
# alb.paragraphs$paras[950]
# alb.paragraphs$paras[grep("Caf\xfc\xbe\x8e\x96\x94\xbc Noir", alb.paragraphs$paras)]
alb.paragraphs <- alb.paragraphs %>%
transmute(paras= gsub("Caf\xfc\xbe\x8e\x96\x94\xbc Noir", "Café Noir", paragraphs) )
# 44,32,21,
colnames(alb.paragraphs) <- c("paragraphs")
alb.paragraphs <- alb.paragraphs %>%
transmute(paras= gsub("Caf\xe9\nNoir", "Café Noir", paragraphs) )
colnames(alb.paragraphs) <- c("paragraphs")
alb.paragraphs <- alb.paragraphs %>%
transmute(paras= gsub("\n", " ", paragraphs) )
alb.paragraphs$paras[959]
alb.paragraphs<- alb.paragraphs %>%
transmute(paragraphs=gsub("\\*|(?<=[A-Z])(\\.)(?=[A-Z]|\\.|\\s)", "", perl=TRUE, paras))
alb.paragraphs <- alb.paragraphs %>%
transmute(paras= gsub("Mrs\\.", "Mrs", paragraphs) )
alb.paragraphs <- alb.paragraphs %>%
transmute(paragraphs= gsub("Mr\\.", "Mr", paras))
colnames(alb.paragraphs) <- c("paras")
alb.paragraphs <- alb.paragraphs %>%
filter(paras!="")
alb.paragraphs <- alb.paragraphs %>%
filter(paras!=" ")
colnames(alb.paragraphs)
alb.paragraphs <- alb.paragraphs %>%
transmute(paragraphs = replace_abbreviation(paras))
alb.paragraphs <- alb.paragraphs %>%
transmute(paras= gsub("\"", "'", paragraphs))
print(length(alb.paragraphs$paras))
alb.paragraphs <- alb.paragraphs %>%
filter(paras!="")
alb.paragraphs <- alb.paragraphs %>%
filter(paras!=" ")
alb.paragraphs <- alb.paragraphs %>%
transmute(paragraphs= gsub("_", "", paras))
|
149a134df3964490132ff8e0d027b7929cd1dcfa
|
8844083a2d42ee4f70b6802fc41927f4775f4a60
|
/inst/doc/nVennR.R
|
93ab7e51c3958890b07a7e8764276a71203101d9
|
[] |
no_license
|
cran/nVennR
|
96450c25d9e2c9522e5845c4b3cf1498991e7d5e
|
beeadaf9fc5aabb9392077544d2c35ad034e302b
|
refs/heads/master
| 2021-04-15T14:19:38.166640
| 2021-01-24T16:00:05
| 2021-01-24T16:00:05
| 126,218,349
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,617
|
r
|
nVennR.R
|
## ----setup, F, include = FALSE------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.show='hide'
)
library(knitr)
uuid <- function() {
hex_digits <- c(as.character(0:9), letters[1:6])
hex_digits <- toupper(hex_digits)
paste(sample(hex_digits, 8), collapse='')
}
subsuid <- function(regex, strng){
l <- gregexpr(regex, strng, perl = T)
for (x in regmatches(strng, l)){
m <- regexpr('([^\\{ \\.\\#]+)', x, perl = T)
names <- regmatches(x, m)
gstr = strng
for (name in names){
nname <- paste('([^\\d\\w<>]', name, ')', sep="")
gstr <- gsub(nname, paste('\\1', '_', uuid(), sep=""), gstr, perl = T)
}
return(gstr)
}
}
knit_print.nVennR = function(x, ...) {
if (is.null(x$svg)){
x <- showSVG(x)
}
s <- subsuid('[\\.\\#](.+?)\\s*\\{', x$svg)
s <- subsuid('bl\\d+', s)
knitr::asis_output(s)
}
# register the method
registerS3method("knit_print", "nVennObj", knit_print.nVennR)
local({
hook_source <- knitr::knit_hooks$get('source')
knitr::knit_hooks$set(source = function(x, options) {
x <- x[!grepl('#noshow$', x)]
hook_source(x, options)
})
})
## ----"plotVenn"---------------------------------------------------------------
library(nVennR)
exampledf
sas <- subset(exampledf, SAS == "Y")$Employee
python <- subset(exampledf, Python == "Y")$Employee
rr <- subset(exampledf, R == "Y")$Employee
myV <- plotVenn(list(SAS=sas, PYTHON=python, R=rr), nCycles = 2000)
myV #noshow
## ----"Iterative"--------------------------------------------------------------
myV2 <- plotVenn(list(SAS=sas, PYTHON=python, R=rr, c("A006", "A008", "A011", "Unk"), c("A011", "Unk", "A101", "A006", "A000"), c("A101", "A006", "A008")))
myV2 <- plotVenn(nVennObj = myV2)
myV2 #noshow
## ----"Low-level"--------------------------------------------------------------
myV3 <- createVennObj(nSets = 5, sSizes = c(rep(1, 32)))
myV3 <- plotVenn(nVennObj = myV3, nCycles = 5000)
myT <- myV3 #noshow
myV3 <- plotVenn(nVennObj = myV3, nCycles = 5000)
myT #noshow
myV3 #noshow
## ----"setVennRegion"----------------------------------------------------------
myV3 <- setVennRegion(myV3, region = c("Group1", "Group3", "Group4"), value = 4) # region equivalent to c(1, 0, 1, 1, 0)
myV3 <- setVennRegion(myV3, region = c(0, 1, 0, 0, 1), value = 8) # region equivalent to c("Group2", "Group5")
myV3 <- plotVenn(nVennObj = myV3, nCycles = 3000)
myV3 #noshow
## ----"opacity"----------------------------------------------------------------
showSVG(nVennObj = myV3, opacity = 0.1, borderWidth = 3)
## ----"setColors"--------------------------------------------------------------
showSVG(nVennObj = myV3, setColors = c('#d7100b', 'teal', 'yellow', 'black', '#2b55b7'))
## ----"showLabels"-------------------------------------------------------------
showSVG(nVennObj = myV3, opacity = 0.1, labelRegions = F, fontScale = 3) # Avoid overlaps by hiding region labels
## ----"directPlot"-------------------------------------------------------------
myV4 <- plotVenn(list(a=c(1, 2, 3), b=c(3, 4, 5), c=c(3, 6, 1)), nCycles = 2000, setColors=c('red', 'green', 'blue'), labelRegions=F, fontScale=2, opacity=0.2, borderWidth=2)
myV4 #noshow
## ----"getVennRegion"----------------------------------------------------------
getVennRegion(myV, c("R", "SAS"))
getVennRegion(myV, c(1, 1, 1))
## ----"listVennRegions"--------------------------------------------------------
listVennRegions(myV4)
listVennRegions(myV4, na.rm = F)
|
8a2f11abde15699884a2311a966834bb751f407c
|
7735468e0344aa52827ad65c3af0535abf5c6dd6
|
/R/gjam1_PA/gjam.R
|
282520f2c13a53016bb216b5d3c696c850e330b9
|
[] |
no_license
|
tywagner/FishJSDM
|
3c5bbb9c723bebfc353dcdc8eca9d229626f16ce
|
367940382d3c3e91e6d5792d661eb0a96f5818e5
|
refs/heads/master
| 2022-01-21T03:23:24.864211
| 2019-07-25T14:36:33
| 2019-07-25T14:36:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,516
|
r
|
gjam.R
|
# rm(list=ls())
# library(gjam)
library(data.table)
library('Rcpp')
library('RcppArmadillo')
# placed cppFns.cpp in working directory and it worked
sourceCpp('cppFns.cpp')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjam.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamPlot.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamSimData.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamTrimY.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamCensorY.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamDeZero.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamReZero.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamPredict.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamSpec2Trait.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamIIE.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamIIEplot.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamOrdination.R')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamPoints2Grid.R')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamPriorTemplate.r')
source('C:/Users/txw19/Documents/Manuscripts/Joint_Fish_Modeling/gjam/R/gjamHfunctions.r')
library(MASS)
# Read in lake covariate and fish data
lakeP=fread("lake_predictors_for_joint_distribution.csv")
fishP=fread("catch_and_effort_most_recent_with_predictors.csv")
# fishP=fread("cpue_JDM.csv")
dim(lakeP) #1343 x 8
dim(fishP) #1343 x 17
lakeP
fishP
# Look at correlations among covariates
# cor(cbind(lakeP$area.hectares, lakeP$max_depth_m, lakeP$Secchi.lake.mean,lakeP$mean.gdd, lakeP$alkalinity))
# transform covariates (covariates are standardized by mean and variance, then transformed back to original scales in output in gjam)
lakeP[,log_area := log(area.hectares)]
lakeP[,log_depth := log(max_depth_m)]
# lakeP[,z_secchi := scale(Secchi.lake.mean)]
# lakeP[,z_gdd := scale(mean.gdd)]
# lakeP[, z_alk := scale(alkalinity)]
# Merge cpe and predictors
setkey(lakeP, DOW)
setkey(fishP, DOW)
dat <- merge(fishP, lakeP)
dat[,.N]
dim(dat)
# Remove column
dat[,dowlknum:=NULL]
# Remove lakes with missing Secchi
dat <- dat[!is.na(dat$Secchi.lake.mean),]
# Find some "outliers"
which(dat$black.crappie>2600)
which(dat$brown.bullhead>500)
which(dat$golden.shiner>300)
# Remove outliers
dat <- dat[dat$black.crappie < 2600 & dat$brown.bullhead < 500 & dat$golden.shiner < 300]
dim(dat)
###########################
# Read in lake lat/longs
lls <- fread("mn_lake_list.csv")
head(lls)
dim(lls)
# Grab lat/long columns
lls2 <- lls[, .(DOW, LAKE_CENTER_LAT_DD5,LAKE_CENTER_LONG_DD5)]
head(lls2)
dim(lls2)
setkey(dat, DOW)
setkey(lls2, DOW)
dat <- merge(dat, lls2)
dat[,.N]
head(dat)
summary(dat)
# Convert abundance to P/A
f1 <- function(x){ifelse(x > 0,1,0)}
dat <- dat[, 4:19 := lapply(.SD, f1), .SDcols=4:19]
head(dat)
xdat <- dat[,23:27]
ydat <- dat[,4:19]
# Convert to data frame for gjam
xdat <- as.data.frame(xdat)
ydat <- as.data.frame(ydat)
# Some gjam options for modelList:
# holdoutN = 0, number of observations to hold out for out-of-sample prediction.
# holdoutIndex = numeric(0), numeric vector of observations (row numbers) to holdout for out-of-sample prediction
# ,holdoutN=200
start.time = Sys.time() # Start timer:
ml <-list(PREDICTX = F, ng=40000,burnin=20000,typeNames=rep("PA",16))
ml$FULL=T
jdm1 = gjam(~ log_area + log_depth + Secchi.lake.mean + mean.gdd + alkalinity,
xdata=xdat, ydata=ydat,
modelList=ml)
# jdm1 = gjam(~ log_area + log_depth + Secchi.lake.mean + mean.gdd + alkalinity +
# I(log_area^2) + I(log_depth^2) + I(Secchi.lake.mean^2)+ I(mean.gdd^2) + I(alkalinity^2),
# xdata=xdat, ydata=ydat,
# modelList=ml)
end.time = Sys.time()
elapsed.time = round(difftime(end.time, start.time, units='mins'), dig = 2)
cat('Posterior computed in ', elapsed.time, ' minutes\n\n', sep='')
# jdm1 <- readRDS(file="gjamOUT1.rds")
str(jdm1)
summary(jdm1)
names(jdm1)
# Grab residual correlation
jdm1$parameters$corMu
# Plot gjam output
plotPars <- list(GRIDPLOTS=T,PLOTTALLy=F,SAVEPLOTS=T,SMALLPLOTS = F)
fit <- gjamPlot(jdm1, plotPars)
####------ BELOW IS FROM ABUNDANCE MODELING #####################
# in-sample posterior predictions
fullpostY <- jdm1$chains$ygibbs
# dim(fullpostY)
# # Calculate posterior medians
yMedian_gjam <- apply(fullpostY,2,median)
# length(yMedian_gjam)
# Posterior means
yMu <- jdm1$prediction$ypredMu
yMuv <- as.vector(yMu)
# Observed values of Y
ObsY <- jdm1$inputs$y
ObsYv <- as.vector(ObsY)
# Plot observed vs. posterior mean
plot(ObsYv, yMuv)
# Overlay posterior medians
# points(ObsYv, yMedian_gjam, col='red')
# points(ObsYv, yMean_gjam, col='green')
abline(0,1)
# In-sample predictions, i.e., "xdata" is not provided in gjamPredict
gjamPredict(jdm1, y2plot = colnames(ydat))
# Northern Pike
gjamPredict(jdm1, y2plot = colnames(ydat)[8])
# Walleye
gjamPredict(jdm1, y2plot = colnames(ydat)[12])
gjamPredict(jdm1, y2plot = colnames(ydat)[13])
# Posterior predictions for each species
sppPred <- array(fullpostY, dim=c(dim(jdm1$chains$bgibbs)[1],dim(xdat)[1],dim(jdm1$prediction$presence)[2]))
hist(sppPred[,,1])
# Figure of predictions for each species
# sppnames <- c("black bullhead","black crappie","bowfin","brown bullhead",
# "common carp","golden shiner","largemouth bass","northern pike",
# "rock bass","smallmouth bass","cisco","walleye",
# "white sucker","yellow bullhead","yellow perch","sunfish")
res <- 6
name_figure <- "ObsPredBySPP_Panel.jpg"
jpeg(filename = name_figure, height = 500*res, width = 500*res, res=72*res)
def.par <- par(no.readonly = TRUE) # save default, for resetting...
nf <- layout(matrix(c(1:16), ncol=4, nrow = 4,byrow=TRUE), respect=F)
layout.show(nf)
par(mar = c(1, 0, 0, 0) + 0.1,oma=c(2,2.5,0,0.5))
for(i in 1:16){
s1 <- apply(sppPred[,,i],2,median)
s2 <- apply(sppPred[,,i],2,mean)
plot(ObsY[,i],s2,axes=F, xlab='',ylab='',type='p', cex=0.5)
# points(ObsY[,i],s2, col="red", cex=0.5)
axis(side=1,cex.axis=0.8, mgp=c(1,0,0),tck= -0.01)
axis(side=2,cex.axis=0.8, tck= -0.005, mgp=c(0,0.3,0), las=1)
box()
abline(0,1)
mtext("Obs", line = 0.8, side = 1, cex = 1, outer=T, adj=0.5)
mtext("pred", line = 1, side = 2, cex = 1, outer=T, adj=0.5)
}
par(def.par)
dev.off()
######################################
######################################
# Out-of-sample prediction can also be done usig gjamPredict, specifying xdata
# Obtain full posterior predictive distributions using FULL=T
xdata <- xdat
newdata <- list(xdata = xdata, nsim = 1000 )
p1 <- gjamPredict(jdm1, newdata = newdata, FULL=T)
# str(p1)
plot(ObsY, p1$sdList$yMu)
abline(0,1)
### p1$ychains has the posterior predictive distributions for each observation and species
###### GRAB POSTERIOR ESTIMATES ################
# head(jdm1$chains$bgibbs) #these are all the betas
BetaOut <- jdm1$chains$bgibbs
dim(BetaOut)
hist(BetaOut[,3])
# apply(BetaOut,2,mean)
# head(jdm1$chains$sgibbs) #these are all the elements of Sigma
SigmaOut <-jdm1$chains$sgibbs
dim(SigmaOut)
saveRDS(BetaOut, file="BetaOut.rds")
saveRDS(SigmaOut, file="SigmaOut.rds")
# saveRDS(jdm1, file="gjamOUT1.rds")
|
acb6577d927a8899dbc0ffeb201102b96935a5df
|
ef3df1815a47215ade0239fa928277679d361a69
|
/functions/geospatial.R
|
50169d60c553198a420ae89ba3fa13fea83978c6
|
[] |
no_license
|
ntag1618/phenoSynth
|
72b86d3e71e6994baff0674c5987f29207439f18
|
99e36716169ea8df567f65ffdf0ba24b79542a6d
|
refs/heads/master
| 2023-01-30T10:16:40.353940
| 2020-12-14T18:40:01
| 2020-12-14T18:40:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,477
|
r
|
geospatial.R
|
#' matrix_to_polygon
#'
#' @param matrix
#' @param id
#' @param type_
#' @param crs
#' Converts the highlighted pixel coords into a spatialpolygon class
#' @return - sps
#'
matrix_to_polygon = function(matrix, id, type_, crs = '+proj=longlat +datum=WGS84'){
p = Polygon(matrix)
ps = Polygons(list(p), ID = id)
sps = SpatialPolygons(list(ps))
proj4string(sps) = CRS(crs)
return (sps)
}
#' build_pft_palette
#'
#' @param raster_
#' Builds a color palet for the modis landcover raster layer
#' @return - list of colors
#'
build_pft_palette = function(raster_, raster2_=NULL){
print ('building palet')
colors = c()
names = c()
color_list = c('#1b8a28', '#36d03e', '#9ecb30', '#a0f79f', '#91bb88', '#b99091', '#f0dfb8', '#d6ed9a',
'#f1dc07', '#ecbb5b', '#4981b1', '#fcee72', '#fd0608', '#9b9353', '#bdbec0', '#bdbec0', '#89cae3')
if (is.null(raster2_)){
v = unique(values(raster_))
}else{
v = unique(c(unique(values(raster_)), unique(values(raster2_))))
}
remove = c(NA)
v = v [! v %in% remove]
v = sort(v, decreasing = FALSE)
for (x in v){
if (x == 17 | x == 0){
colors = c(colors,color_list[17])
name = as.character(subset(pft_df, pft_df$pft_key == 0)$pft_expanded)
names = c(names, name)
}else{
colors = c(colors, color_list[x])
name = as.character(subset(pft_df, pft_df$pft_key == x)$pft_expanded)
names = c(names, name)
}
}
col_palette = colorBin(colors, bins = c(v[1] -.5 ,v + .5))
colors_ = list('colors' = colors, 'names' = names, 'pft_key' = v, 'palette' = col_palette)
return (colors_)
}
#' build_raster_grid
#'
#' @param raster_
#' @param map_
#' @param crs
#' Build grid for any input raster
#' @return - grid or sp_lines
#'
build_raster_grid = function(raster_, map_ = NULL, crs='wgs'){
r_ = raster_
xmin = xmin(extent(r_))
xmax = xmax(extent(r_))
ymin = ymin(extent(r_))
ymax = ymax(extent(r_))
nrows = nrow(r_)
ncols = ncol(r_)
resolution = res(r_)[1]
lats = c()
lons = c()
ids = c()
for (x in c(0:ncols)){
id = x
lat1 = ymax
lat2 = ymin
lon1 = xmin + (x * resolution)
lon2 = xmin + (x * resolution)
lats = c(lats, lat1, lat2)
lons = c(lons, lon1, lon2)
ids = c(ids, id, id)
}
for (xx in c(0:nrows)){
id = xx + x
lat1 = ymax - (xx * resolution)
lat2 = ymax - (xx * resolution)
lon1 = xmax
lon2 = xmin
lats = c(lats, lat1, lat2)
lons = c(lons, lon1, lon2)
ids = c(ids, id, id)
}
df.sp = data.frame(id=ids, latitude=lats, longitude=lons)
if (class(df.sp) == 'data.frame'){
coordinates( df.sp ) = c( "longitude", "latitude" )
id.list = sp::split( df.sp, df.sp[["id"]] )
id = 1
# For each id, create a line that connects all points with that id
for ( i in id.list ) {
event_lines = SpatialLines(list(Lines(Line(i[1]@coords), ID = id)),
proj4string = CRS(merc_crs))
if (id == 1){
sp_lines = event_lines
} else {
sp_lines = rbind(sp_lines, event_lines)
}
id = id + 1
}
sp_lines
}else{
print ('already a sp object')
}
is_not_null = function(x) ! is.null(x)
if (is_not_null(map_)){
print ('Adding Raster grid to map')
if (crs=='merc'){
grid = spTransform(sp_lines, crs(wgs_crs))
}else{grid = sp_lines}
leafletProxy(map_) %>% addPolylines(data = grid, weight = 1.5, opacity = 1, color = 'grey', group = '250m MODIS Grid') %>%
addLayersControl(baseGroups = c("World Imagery", "Open Topo Map"),
overlayGroups = c('MODIS Land Cover 2016', 'Vegetation Cover Agreement', '250m MODIS Grid'),
position = c("topleft"),
options = layersControlOptions(collapsed = FALSE)) %>%
hideGroup('250m Highlighted Pixels') %>%
showGroup('250m Highlighted Pixels')
return (grid)
}else{
if (crs=='merc'){
sp_lines = spTransform(sp_lines, crs(wgs_crs))
}
return (sp_lines)
}
}
#' from_crs1_to_crs2_lon_lat
#'
#' @param lon_
#' @param lat_
#' @param from_crs
#' @param to_crs
#' Function to convert any crs lat/lon coordinates into any crs lat/lon sp object (default is wgs to sinu)
#' @return - p
#'
from_crs1_to_crs2_lon_lat = function(lon_,lat_, from_crs = "+proj=longlat +datum=WGS84",
to_crs = "+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs"){
xy = data.frame(matrix(c(lon_,lat_), ncol=2))
colnames(xy) = c('lon', 'lat')
coordinates(xy) = ~ lon + lat
proj4string(xy) = CRS(from_crs)
p = spTransform(xy, CRS(to_crs))
return (p)
}
#' crop_raster
#'
#' @param lat_
#' @param lon_
#' @param r_
#' @param reclassify
#' @param primary
#' @param height
#' @param width
#' @param crs_str
#' @param crop
#' Creates boundary box for clipping rasters using lat/lon from phenocam site
#' @return - rc
#'
crop_raster = function(lat_, lon_, r_, reclassify=FALSE, primary=NULL, height=.03, width=.05, crs_str = "+proj=longlat +datum=WGS84 +no_defs", crop=TRUE){
if (crop==TRUE){
e = as(extent(lon_-width, lon_ + width, lat_ - height, lat_ + height), 'SpatialPolygons')
crs(e) <- crs_str
r = raster::crop(r_, e, snap='near')
} else { r = r_}
if (reclassify == FALSE){
return (r)
}else if (reclassify == TRUE){
water = 17*2
m = c(1,2,
2,2,
3,2,
4,2,
5,2,
6,2,
7,2,
8,2,
9,2,
10,2,
11,2,
12,2,
13,2,
14,2,
15,2,
16,2,
17,2)
if(!is.null(primary)){
prim = primary*2
m[prim] = 1
}
rclmat = matrix(m, ncol=2, byrow=TRUE)
rc = raster::reclassify(r, rclmat)
if (length(unique(values(rc))) == 1){
m = c(1,NA,
2,NA,
3,NA,
4,NA,
5,NA,
6,NA,
7,NA,
8,NA,
9,NA,
10,NA,
11,NA,
12,NA,
13,NA,
14,NA,
15,NA,
16,NA,
17,NA)
rclmat = matrix(m, ncol=2, byrow=TRUE)
rc = raster::reclassify(r, rclmat)
}
return (rc)
}
}
#' get_x_y_sinu_from_wgs_pt
#'
#' @param lon_
#' @param lat_
#' Creates a reprojection of a lat/lon WGS84 point into sinusoidal Modis projection
#' @return - p
#'
get_x_y_sinu_from_wgs_pt = function(lon_,lat_){
xy = data.frame(matrix(c(lon_,lat_), ncol=2))
colnames(xy) = c('lon', 'lat')
coordinates(xy) = ~ lon + lat
proj4string(xy) = CRS("+proj=longlat +datum=WGS84")
p = spTransform(xy, CRS("+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs"))
return (p)
}
#' get_lat_lon_wgs_from_sinu_pt
#'
#' @param lon_
#' @param lat_
#' Creates a reprojection of a lat/lon WGS84 point into sinusoidal Modis projection
#' @return - p
#'
get_lat_lon_wgs_from_sinu_pt = function(lon_,lat_){
print ('Reprojecting coords to WGS84')
xy = data.frame(matrix(c(lon_,lat_), ncol=2))
colnames(xy) = c('lon', 'lat')
coordinates(xy) = ~ lon + lat
proj4string(xy) = CRS("+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs")
p = spTransform(xy, CRS("+proj=longlat +datum=WGS84"))
print (coordinates(xy))
print (coordinates(p))
return (p)
}
#' rad_to_deg
#'
#' @param rad - value in radians
#' converts radians to degrees
#'
rad_to_deg = function(rad) {
(rad * 180) / (pi)
}
#' run_add_polyline
#'
#' @param site_data_
#' @param azm_
#' Given row from sites, create points for polyline from site.
# This function uses angle for field of view and los as the
# far distance of the FOV.
#'
run_add_polyline = function(site_data_, azm_){
los = .01
lat = site_data_$lat
lon = site_data_$lon
dst = sqrt(los**2 + los**2)
c = rotate_pt(lon, lat, (azm_-25), dst)
b = rotate_pt(lon, lat, (azm_+25), dst)
cx = c[[1]]
cy = c[[2]]
bx = b[[1]]
by = b[[2]]
datalon = c(lon,cx,bx,lon)
datalat = c(lat,cy,by,lat)
camera = site_data_$site
id_ = paste('fov',camera, sep='')
add_polyline(datalon, datalat, id_ = 'azm_', .45, 'red', group = 'azm_')
}
#' rotate_pt
#'
#' @param lon
#' @param lat
#' @param azm
#' @param r
#' Rotate a point based on AZM
#' @return - list of longitude and latitude
#'
rotate_pt = function(lon, lat, azm, r){
rad = azm * (pi / 180)
lon_ = lon + (r * sin(rad))
lat_ = lat + (r * cos(rad))
return (list(lon_, lat_))
}
#' get_x_y_albers_from_wgs84
#'
#' @param lon_
#' @param lat_
#'
#' @return - p
#'
get_x_y_albers_from_wgs84 = function(lon_,lat_){
xy = data.frame(matrix(c(lon_,lat_), ncol=2))
colnames(xy) = c('lon', 'lat')
coordinates(xy) = ~ lon + lat
proj4string(xy) = CRS("+proj=longlat +datum=WGS84")
p = spTransform(xy, CRS("+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"))
return (p)
}
#' build_landsat_lc_pallet
#'
#' @param raster_
#' @param landsat_key
#'
#' @return - list of colors
#'
build_landsat_lc_pallet = function(raster_, landsat_key){
print ('building landsat landcover palette')
colors = c()
names = c()
color_list = c('#1b8a28', '#36d03e', '#9ecb30', '#a0f79f', '#91bb88', '#b99091', '#f0dfb8', '#d6ed9a',
'#f1dc07', '#ecbb5b', '#4981b1', '#fcee72', '#fd0608', '#9b9353', '#bdbec0', '#bdbec0', '#89cae3')
v = unique(values(raster_))
remove = c(NA)
v = v [! v %in% remove]
v = sort(v, decreasing = FALSE)
print (v)
count = 0
for (x in v){
count = count + 1
red = subset(landsat_key, landsat_key$ID == x)$Red/255
green = subset(landsat_key, landsat_key$ID == x)$Green/255
blue = subset(landsat_key, landsat_key$ID == x)$Blue/255
color = rgb(red,green,blue)
colors = c(colors, color)
# colors = c(colors, color_list[count])
name = as.character(subset(landsat_key, landsat_key$ID == x)$NLCD.Land.Cover.Class)
names = c(names, name)
}
colors_ = list('colors' = colors, 'names' = names)
return (colors_)
}
|
e3dedc063c83373aa958091378f7946aa343cce6
|
50f04ac4d39b12959b5635bd97ac37e659cb8e0f
|
/Analysis/MNepal_Casestudy01.R
|
f4417a309ce6d9937d23345204474a60218357d5
|
[] |
no_license
|
m-nepal/MSDS6306_CaseStudy01
|
61fe65dfe07c4d6e3309ea5e45767b338b681af6
|
bffb42324d409dbebcbd2c5c552ab04dff44a023
|
refs/heads/master
| 2021-01-21T14:40:26.290590
| 2017-07-01T04:43:39
| 2017-07-01T04:43:39
| 95,325,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,490
|
r
|
MNepal_Casestudy01.R
|
# cleaning variables
rm(list=ls())
ls()
# set the working dir and imprt the two csv files
setwd("C:/Users/mohan/Desktop/MSDS6306_CaseStudy01/Data")
# here we are replacing blank cell as NA
beers <- read.csv("Beers.csv", sep = ",", header = TRUE,na.strings = c("","NA"))
breweries <- read.csv("Breweries.csv", header = TRUE, sep=",",na.strings =c("","NA"))
# check the content of beers and breweries
head(beers)
head(breweries)
# used two method to verify number of brewery per state
brewery_ps_way1 <- data.frame(table(breweries$State))
brewery_ps_way2 <- aggregate(rep(1, length(breweries$State)),by=list(breweries$State), sum)
# check the number of brwery per state by calling head method
head(brewery_ps_way1)
# In this result, Var1 represnets the State and Freq represents the number of brewery in that state
head(brewery_ps_way2)
#in this method Group.1 column is State and x represnts the number of brewery
# first 6 rows, we can verify both of the method are giving the same result.
# Using dplyr package rename method to give meaning full name
install.packages("dplyr")
library(dplyr)
breweriesPerState <- rename(brewery_ps_way1,State = Var1,Breweries_Count = Freq)
head(breweriesPerState)
# We can apply the same rename method in way 2 also.
# Merging two data set on Brew_ID
merged_data <- merge(beers, breweries, by.x = "Brewery_id",by.y = "Brew_ID")
dim(merged_data)
# Here Beer name is listed as Name.x and Brewery name as Name.y
# Use rename method to give some meaning full name on merged data
# use rename method from "dplyr" package
merged_new <- rename(merged_data, Beer = Name.x , Brewery = Name.y)
head(merged_new)
tail(merged_new)
# Report NA's each column
missing.beer <- apply(beers,2, function(x){sum(is.na(x))})
missing.brewery <- apply(breweries,2, function(x){sum(is.na(x))})
missing.merged <- apply(merged_new, 2, function(x){ sum(is.na(x))})
# another way to report missing merge
merged_2 <- replace(merged_new, merged_new == "", NA)
missing.merged_2 <- apply(merged_2, 2, function(x){ sum(is.na(x))})
# check the missing contents
missing.beer
missing.brewery
missing.merged
missing.merged_2
# Assign a new merged data set to a new variable for easy use for further analysis
cleanedData <- merged_2
# Check first 6 and last 6 rows
head(cleanedData)
tail(cleanedData)
# For computing median for alcohal content and IBU for each state used two methods for comparing the result
#install.packages("dplyr")
library(dplyr)
library(ggplot2)
#finding medain ABV and IBU per state
median_per_state_way1 <- ddply(cleanedData, .(State), summarize, ABV=median(ABV), IBU=median(IBU))
median_per_state_way2 <- cleanedData %>% group_by(State) %>% summarise_at(vars(ABV, IBU), median)
head(median_per_state_way1)
head(median_per_state_way2)
# the above method doesn't seem to provide correct result, so I used another method here just to verify
median_ABV_IBU <- aggregate(cbind(cleanedData$ABV,cleanedData$IBU)~State, FUN = median,cleanedData,na.rm = TRUE)
head(median_ABV_IBU)
#here ABV is named as V1 and IBU as V2, so using colnames we can rename to give meaningful name
colnames(median_ABV_IBU) <- c("State","Median_ABV","Median_IBU")
head(median_ABV_IBU)
# bar chart???
ggplot(median_ABV_IBU,aes(median_ABV_IBU$State, y=median_ABV_IBU$Median_ABV))+
geom_bar(stat="identity")+labs(x="State",y="ABV") + ggtitle("IBU median per State")
ggplot(median_ABV_IBU,aes(median_ABV_IBU$State, y=median_ABV_IBU$Median_IBU))+
geom_bar(stat="identity")+labs(x="State",y="IBU")+ggtitle("ABV median per State")
#combined
library(reshape2)
bpdata <- melt(median_ABV_IBU$State)
ggplot(bpdata,aes(Block,value,fill=variable))+
geom_bar(stat="identity",position="dodge")
# Which state MAX alcohalic beer?
max_ABV <- max(cleanedData$ABV,na.rm = TRUE)
max_ABV_2 <- arrange(cleanedData,desc(cleanedData$ABV))
max_ABV_3 <- head(max_ABV_2,1)
max_ABV_State <- data.frame(max_ABV_3$State,max_ABV_3$ABV)
# IBU
max_IBU <- max(cleanedData$IBU,na.rm = TRUE)
max_IBU_2 <- arrange(cleanedData,desc(cleanedData$IBU))
max_IBU_3 <- head(max_IBU_2,1)
max_IBU_State <- data.frame(max_IBU_3$State,max_IBU_3$IBU)
# let's find out which state has the max ABV and Most bitter beer
max_ABV_State
max_IBU_State
# CO has max ABV and OR has most bitter beer
# summary statistics of ABV
summary(cleanedData$ABV)
# install.packages
install.packages("ggplot2")
library(ggplot2)
ggplot(beers, aes(x = ABV, y = IBU)) + geom_point(na.rm=TRUE)+geom_smooth(method=lm,se=FALSE, na.rm=TRUE)
|
d40903c0bc570bd0569d3b6b44546cd732f687ad
|
c0e87f9fc0886ccee5eeac245dc906f804ee95f6
|
/src/LesserAlleleFreq_singleSample.R
|
61d80fb3e6c2a51f9a27286fe9e1ae9b9aaca6da
|
[] |
no_license
|
davemcg/variant_prioritization
|
f4809fc1df721c39ce1a3cddfa919ee8d176232d
|
dbb2334b3bda698b2bae3bdced036aa2a89847ac
|
refs/heads/master
| 2023-04-10T12:12:18.621889
| 2020-02-04T20:46:51
| 2020-02-04T20:46:51
| 76,049,497
| 2
| 3
| null | 2020-02-23T04:23:11
| 2016-12-09T15:59:42
|
Python
|
UTF-8
|
R
| false
| false
| 2,123
|
r
|
LesserAlleleFreq_singleSample.R
|
args <- commandArgs(trailingOnly=TRUE)
#When testing, comment out line above and use the line below.
#args <- c("W:/Brooks_Coloboma/rd2/prioritization_freebayes/gemini_tsv/freebayes.combined.filtered.rd2.COL124_1.gemini.sorted.tsv",
# "COL124_1", "COL124_1.LAF.jpeg")
library(tidyverse)
library(RColorBrewer)
gemini_input <- read_tsv(args[1], col_names = TRUE, na = c("NA", "", "None", "."), col_types = cols(.default = col_character())) %>%
select('chr_annovar', 'start_annovar', starts_with('gts'), starts_with('gt_'),
'panel_class', 'gene_refgenewithver_annovar') %>%
type_convert() %>%
mutate(chr_annovar = as.factor(chr_annovar)) %>%
rename_all(funs(str_replace(., args[2], ""))) %>%
filter(gt_depths. >= 30) %>%
group_by(chr_annovar) %>%
arrange(start_annovar, .by_group = TRUE) %>%
mutate(LesserAlleleFreq = ifelse(gt_alt_freqs. <= 0.5, gt_alt_freqs., 1 - gt_alt_freqs.)) %>%
mutate(variant_no = row_number()) %>%
mutate(DepthGroup = ifelse(gt_depths. >= 100, "DP>=100", "DP>=30" ) )
gemini_input$chr_annovar = factor(gemini_input$chr_annovar,
levels = c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","X","Y"))
gemini_input$DepthGroup = factor(gemini_input$DepthGroup,levels = c("DP>=100", "DP>=30"))
ggplot(gemini_input, aes(x= variant_no, y = LesserAlleleFreq, color = DepthGroup)) +
scale_color_brewer(palette = "Set1") +
coord_cartesian(ylim = c(0, 0.52)) +
labs(title = args[2], x= 'Variants', y = 'Lesser allele frequency') +
facet_wrap(~chr_annovar, scales = "free_x") +
geom_point(alpha = 0.5, size = 1) +
theme_bw() +
theme(axis.text.x = element_text(size=8), axis.text.y = element_text(size=8)) +
theme(axis.title.x = element_text(size=16), axis.title.y = element_text(size=16)) +
theme(legend.position = c(0.9, 0.1))
#theme(plot.title = element_text(hjust = 0.5))
ggsave(args[3], path = ".", width = 32, height = 18, units = "cm")
#write_tsv(gemini_filtered, file.path('.', args[4]))
|
b6ad4e4b37bd7492e8609b94c371be53ac704a41
|
1875ad05eafdaab03dc9fbb7f0bcee207c0b8cbc
|
/1. Descriptive stats_thesis.R
|
63aa8c87f8dc856beb9faaa0f643a502857d8c9d
|
[] |
no_license
|
sybrendeboever/Master-thesis-statistics-KUL
|
91ab784829669a357db91a37353b8e14d11833f3
|
caba87d8aba7966721df84ddbb6cd76301dafb90
|
refs/heads/main
| 2023-07-09T11:47:59.687771
| 2021-08-15T17:45:53
| 2021-08-15T17:45:53
| 378,909,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,484
|
r
|
1. Descriptive stats_thesis.R
|
# -------------------------------------------------------------- #
# -------------------- 1. DESCRIPTIVE STATS -------------------- #
# -------------------------------------------------------------- #
### Final dataset
data = read.csv("D:/Unief/Master Statistiek/Jaar 2/Thesis/6. Data/all_NA_2019_2020_NoMiss_OmgedraaideScales_CorrecteQ22.csv")
data$id = paste(data$Student_ID,data$YEAR,sep="_")
data2 = read.csv("D:/Unief/Master Statistiek/Jaar 2/Thesis/6. Data/Results/CFA/dataq_NA.csv") # Dataq_NA bevat de first attempt van students
data2$id = paste(data2$Student_ID,data2$YEAR,sep="_")
data = data[data$id %in% data2$id,]; rm(data2)
# Parents
data$Q5[data$Q5==3] = NA
# Vocational interest
for (i in 1:nrow(data)){
row = data[i,c("Q15_1", "Q15_3", "Q15_2","Q15_5", "Q15_6", "Q15_7", "Q15_8")] # remark the order of Qs!
if (anyNA(row)){
data$Q15[i] = NA
} else{
if (row[1]==1){ # PL
data$Q15[i] = 1 # Number of question
}
if (row[2]==1){ # OE
data$Q15[i] = 3
}
if (row[3]==1){ # CI
data$Q15[i] = 2
}
if (row[4]==1){ # PL+OE+CI
data$Q15[i] = 5
}
if (row[5]==1){ # PL+OE
data$Q15[i] = 6
}
if (row[6]==1){ # PL+CI
data$Q15[i] = 7
}
if (row[7]==1){ # OE+CI
data$Q15[i] = 8
}
if (data[i,c("Q15_4")]==1){
data$Q15[i] = 0
}
}
}
# Migration background
data$SOE[data$Herkomst %in% c("1/Migratie-achtergrond (EU1)","2/Migratie-achtergrond (niet-EU1)")] = "1" # Migration background
data$SOE[data$Herkomst == "5/Overige"] = "2" # 'Other'
data$SOE[data$Herkomst %in% c("3/Geen migratie-achtergrond","4/Niet toegewezen")] = "3" # No migration background: ref
### 1. Descriptive tables of (1) survey questions & (2) used background variables
# ---------------------------------------------------------------------------------
########## (1) Survey questions
### a. Likert responses for the questions
## The relevant question in survey order
items = c("Q5","Q8","Q9","Q14_1","Q14_2","Q14_3","Q14_4","Q16","Q17_2","Q17_3","Q17_4","Q17_5","Q17_6",
"Q20","Q23","Q24_1","Q24_2","Q24_3","Q24_4","Q24_5","Q25_2","Q25_3","Q25_4","Q25_5",
"Q30","Q31","Q32_1","Q32_2","Q32_3","Q32_4","Q32_5","Q32_6","Q32_7")
## Create count table
# start
counts = t(table(data$Q5))
counts = cbind(counts,"3"=NA,"4"=NA,"5"=NA) # Create a table with 5 columns (= max nr of responses in the survey)
row.names(counts) = c("Q5")
# add others
for (i in items[2:length(items)]){ # Create a table for each survey item
c = t(table(data[,i]))
row.names(c) = i # indicate the item
if (ncol(c)<5){
n = ncol(c)
for (a in 1:(5-n)){ # add NA columns until 5 columns are reached
col = as.character(n+a)
c = cbind(c,col=NA)
}
}
counts = rbind(counts,c) # add the new count to the previous one
}
### b. add total number of responses per question
counts = data.frame(counts)
counts$Total = apply(counts,1,function(x){sum(x,na.rm=TRUE)})
### c. add % missingness
counts$Missing = round(100-counts$Total*100/nrow(data),2)
### d. add KMO values
cor = psych::polychoric(data[,items[! items %in% c("Q5","Q31")]]) # remove Q5 and Q31 bc they are bgvars
kmo = data.frame(psych::KMO(cor$rho)$MSAi)
names(kmo) = c("KMO")
# join the counts and kmo dataset
counts$item = row.names(counts)
kmo$item = row.names(kmo)
counts = dplyr::left_join(counts,kmo,by="item")
counts$KMO = round(counts$KMO,2)
row.names(counts) = counts$item
counts$item = NULL
# write.csv(counts, file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/Results/1. Descriptive statistics/counts_surveyquestions.csv")
########## (2) Background variables
factors = c("Gender","Fase","Q5","Q15","SOE","Q31")
data2 = data.frame(apply(data,2,factor))
list=list()
for (i in factors){
n = nlevels(data2[,i])
groups = data.frame(var=character(n),levels=character(n),summary=character(n),percent=c(n),NAs=numeric(n))
groups$var = i
groups$levels = levels(data2[,i])
groups$summary = paste("n = ",as.numeric(table(data2[,i])),sep="")
groups$percent = round(as.numeric(table(data2[,i]))*100/sum(!is.na(data2[,i])),2)
groups$NAs = sum(is.na(data2[,i]))
groups$NA_percent = sum(is.na(data2[,i]))*100/nrow(data2)
list[[i]] = groups
}
summary_1=do.call(rbind,list)
# write.csv(summary_1,file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/3. Results/1. Descriptive statistics/bgvars_table2.csv")
# Dependency between vocational interest and engineering persistence
data$voc_int = ifelse(data$Q15=="0",0,1)
data$voc_int[is.na(data$Q15)] = NA
chisq.test(table(data$Q31,data$voc_int))
fisher.test(table(data$Q31,data$Q15))
# --------------------------------------------------------------------------
s = list()
s[[1]] = summary[,c("var","summary","levels","percent")]
names(summary)[names(summary)=="summary"] = c("summary_test")
names(summary)[names(summary)=="percent"] = c("percent_test")
s[[2]] = summary[,c("percent_test")]
names(summary)[names(summary)=="summary"] = c("summary_train")
names(summary)[names(summary)=="percent"] = c("percent_train")
s[[3]] = summary[,c("percent_train")]
s = do.call(cbind,s)
# write.csv(s,file="D:/Unief/Master Statistiek/Jaar 2/Thesis/6. Data/Results/Data exploration/Percent_bgvar_data_test_train.csv")
### 2. Polychoric correlations for all items (without Q5 and Q31)
# ------------------------------------------------------------------
items = c("Q8","Q9","Q14_1","Q14_2","Q14_3","Q14_4","Q16","Q17_2","Q17_3","Q17_4","Q17_5","Q17_6",
"Q20","Q23","Q24_1","Q24_2","Q24_3","Q24_4","Q24_5","Q25_2","Q25_3","Q25_4","Q25_5",
"Q30","Q32_1","Q32_2","Q32_3","Q32_4","Q32_5","Q32_6","Q32_7")
cor = psych::polychoric(data[,items])
corrplot::corrplot.mixed(cor$rho,number.cex = 0.45,
tl.cex = 0.45,lower.col="black")
#export::graph2ppt(file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/3. Results/1. Descriptive statistics/corrplot.ppt",append=TRUE)
### 3. Missing data
# -------------------
library(tidyverse)
library(grid)
# a. Count the number of missing values per student
# 859 completers and 181 non-completers
tel = data.frame(t(data[,items]))
names(tel) = data$id
nas = tel %>%
as_tibble() %>%
summarize(across(everything(),
~sum(is.na(.)))) %>%
gather()
plot = data.frame(table(nas$value))
ggplot(data=plot,aes(x=factor(Var1),y=Freq))+
geom_col()+
theme_bw()+
ylim(0,75)+
ylab("Frequency")+xlab("Number of missing values")
#export::graph2ppt(file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/3. Results/1. Descriptive statistics/NAs.ppt",append=TRUE)
# b. Check the dropout patterns
items = c("Q8","Q9","Q14_1","Q14_2","Q14_3","Q14_4","Q16","Q17_2","Q17_3","Q17_4","Q17_5","Q17_6",
"Q24_1","Q24_2","Q24_3","Q24_4","Q24_5","Q20","Q23","Q25_2","Q25_3","Q25_4","Q25_5",
"Q30","Q32_1","Q32_2","Q32_3","Q32_4","Q32_5","Q32_6","Q32_7") # Correct survey order
## Dropout patterns
pattern = data[,items]
pattern[!is.na(pattern)] = 1
pattern[is.na(pattern)] = 0
pattern$pattern = apply(pattern,MARGIN =1,FUN = function(x){paste(x,sep="",collapse = "")}) # dropout pattern
pattern$id = data$id # get the ids to match the number of NAs per subject
pattern = left_join(pattern,nas,by=c("id"="key"))
names(pattern)[names(pattern)=="value"] = c("nas")
pattern = doBy::orderBy(~nas,pattern)
## Count the frequency per pattern (because you only want to plot the unique ones)
pat_freq = data.frame(table(pattern$pattern))
pattern = left_join(pattern,pat_freq,by=c("pattern"="Var1"))
## Only keep the observations with missingness
pattern_miss = pattern[pattern$nas>0,]
## Remove unnecessary variables (c("pattern","id","nas"))
pattern_miss[,c("pattern","id","nas")] = NULL
## Only keep the unique missingness patterns
pattern_miss =unique(pattern_miss)
plot = stack(pattern_miss[,items])
plot$id = rep(seq(nrow(pattern_miss),1)) # reverse, for plotting reasons
## plot
plot1 = ggplot(data=plot,aes(x=ind,y=factor(id)))+
geom_tile(aes(fill = factor(values)))+
scale_fill_manual(values=c("brown4","antiquewhite1"),guide=F)+
theme_test()+
scale_x_discrete(position = "top",guide=guide_axis(angle=75))+
theme(plot.background =element_blank()) + xlab(NULL)+ ylab("Missingness pattern")+
geom_vline(xintercept = seq(1.5,30.5,by=1),alpha=0.025)+
geom_hline(yintercept = seq(1.5,31.5,by=1),alpha=0.25)+
scale_y_discrete(labels=factor(seq(32,1))) # overwrite the numbers to have a better order
freqs = data.frame(pattern_miss$Freq,id=seq(nrow(pattern_miss),1))
names(freqs) = c("freq","id")
freqs$x = factor(rep(1))
plot2 = ggplot(data=freqs,aes(x=x,y=factor(id),fill=freq))+
geom_tile(col="gray45")+
scale_fill_gradient(low="white",high="brown4",guide = FALSE)+
geom_text(aes(label=freq),col="gray25",size=3)+
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_blank(),
panel.background = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.ticks.x = element_blank())+
ggtitle("Freq \n ")+ylab(NULL)+xlab(NULL)
pushViewport(viewport(layout=grid.layout(1,15)))
print(plot1,vp=viewport(layout.pos.row=1,layout.pos.col=c(1:14)))
print(plot2,vp=viewport(layout.pos.row=1,layout.pos.col=c(15)))
#export::graph2ppt(file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/3. Results/1. Descriptive statistics/NAs.ppt",append=TRUE)
## c. MCAR
# 1. Little's MCAR test: "Geen MCAR (p<0.05, H0="missingness is MCAR"), opm: geldt niet voor categorical data
library(naniar)
test = data.frame(apply(data[,order_qs],2,factor))
mcar_test(test)
# 2. Zelf missingness checken: mbv 'prev' (voor monotome miss (hier zijn slecht 28 studenten niet monotoom))
# a. drie aparte logistic regressions (ss was groot genoeg)
order_qs = c("Q8","Q9","Q14_1","Q14_2","Q14_3","Q14_4","Q16","Q17_2","Q17_3","Q17_4","Q17_5","Q17_6",
"Q24_1","Q24_2","Q24_3","Q24_4","Q24_5","Q20","Q23","Q25_2","Q25_3","Q25_4","Q25_5",
"Q30","Q32_1","Q32_2","Q32_3","Q32_4","Q32_5","Q32_6","Q32_7") # Correct survey order
# [ 0 = MISSING ; 1 = OBSERVED ]
# Per variable
sub = data[,c("Gender","Age","Fase","Q5",order_qs)]
sub = data.frame(apply(sub,2,factor))
sub$Q9 = relevel(sub$Q9,ref=1)
sub$Age = as.numeric(as.character(sub$Age))
list = list()
for (i in names(sub)[! names(sub) %in% c("Q8","Gender","Age","Fase","Q5","D")]){ # exclude Q8 because this one is complete
# Copy dataset for safety reasons
a = sub
# Create missingness indicator
a[,i] = factor(ifelse(is.na(a[,i]),0,1)) # missing=0, observed=1
# Remove all variables following variable i (remark that the bgvars are the first variables so they are always included)
nr = which(names(sub)==i)
a = data.frame(a[,c(1:nr)])
list[[i]] = a
}
# Q9
fit = glm(Q9 ~ ., # "." can be used because it includes all the remaining vars in the data, which is here restrict to only the necessary ones
family = binomial(link = "logit"),
data=list[["Q9"]])
summary(fit)
car::vif(fit)
# Q14_1
fit = glm(Q14_1 ~ Gender + Age + Fase + Q5 + Q9,
family = binomial(link = "logit"),
data=list[["Q14_1"]])
alias(fit)
summary(fit)
car::vif(fit)
# Q17_2
fit = glm(Q17_2 ~ Gender + Age + Fase + Q5 + Q9 + Q14_1 +Q14_2 +Q14_3 +Q14_4 +Q16,
family = binomial(link = "logit"),
data=list[["Q17_2"]])
summary(fit)
car::vif(fit)
# b. 5 delen in dataset
# create one dataset for each survey part that includes only students who reached that part
p1 = c("Q9", "Q14_1", "Q14_2", "Q14_3", "Q14_4") # +/- RU
p2 = c("Q16", "Q17_2","Q17_3", "Q17_4", "Q17_5", "Q17_6") # = CFC
p3 = c("Q24_1", "Q24_2", "Q24_3","Q24_4", "Q24_5") # +/- CU
p4 = c("Q20", "Q23", "Q25_2", "Q25_3", "Q25_4", "Q25_5") # +/- CC
p5 = c("Q30", "Q32_1", "Q32_2", "Q32_3", "Q32_4", "Q32_5", "Q32_6","Q32_7") # = CE
incl = c("Gender","Age","Fase","Q5","Q8")
data$p1 = apply(data[,p1],1,function(x){ifelse(anyNA(x),0,1)}) # 1=observed ; 0=NA
data$p2 = apply(data[,p2],1,function(x){ifelse(anyNA(x),0,1)})
data$p3 = apply(data[,c(p3)],1,function(x){ifelse(anyNA(x),0,1)})
data$p4 = apply(data[,c(p4)],1,function(x){ifelse(anyNA(x),0,1)})
data$p5 = apply(data[,p5],1,function(x){ifelse(anyNA(x),0,1)})
data$p3_5 = apply(data[,c(p3,p4,p5)],1,function(x){ifelse(anyNA(x),0,1)})
part1 = data[,c(incl,"p1")]
part2 = data[!(data$p1==0),c(incl,p1,"p2")] # exclude the previous students who dropped out in part1
part3 = data[!(data$p1==0 | data$p2==0),c(incl,p1,p2,"p3")]
part4 = data[!(data$p1==0 | data$p2==0 | data$p3==0),c(incl,p1,p2,p3,"p4")]
part5 = data[!(data$p1==0 | data$p2==0 | data$p3==0 | data$p4==0),c(incl,p1,p2,p3,p4,"p5")]
part3_5 = data[!(data$p1==0 | data$p2==0 ),c(incl,p1,p2,"p3_5")]
list = list(part1=part1,part2=part2,part3=part3,part4=part4,part5=part5,part3_5=part3_5)
for (i in 1:6){
list[[i]] = data.frame(apply(list[[i]],2,factor))
if (i>1){
list[[i]]$Q9 = NULL # delete this question and use Q8 instead because of singularity
}
list[[i]]$Age = as.numeric(as.character(list[[i]]$Age))
}
# Part1
fit = glm(p1 ~ .,
family = binomial(link = "logit"),
data=list[["part1"]])
summary(fit)
car::vif(fit)
# Part2
fit = glm(p2 ~ .,
family = binomial(link = "logit"),
data=list[["part2"]])
summary(fit)
car::vif(fit)
# Part3_5
a = list[["part3_5"]]
fit = glm(p3_5 ~ .,
family = binomial(link = "logit"),
data=a[,-which(names(a)=="Q17_2")], # Q17_2 showed large VIF
method="brglmFit");fit
summary(fit)
# Part3 (does not work, neither does combining part3+part4+part5)
library(brglm2)
library(detectseparation)
# - separation issue because of zero count for certain response categories
fit = glm(p3 ~ .,
family = binomial(link = "logit"),
data=list[["part3"]],
method="detect_separation");fit
# - investigate separation issue
a = list[["part3"]]
for (i in names(a)){ # separated variables
print(table(a[,c("p3",i)])) # Check wheter the categories have 0 count for either completers or dropouts
}
a$Q14_2 = relevel(a$Q14_2,ref = "2") # The reference level had 0 count
a$Q17_2 = relevel(a$Q17_2,ref = "2")
a$Q17_4 = relevel(a$Q17_4,ref = "2")
fit = glm(p3 ~ .,
family = binomial(link = "logit"),
data=a,
method="detect_separation");fit
#
fit = glm(p3_5 ~ .,
family = binomial(link = "logit"),
data=list[["part3_5"]],
method="brglmFit") # Use this one
summary(fit)
fit = glm(p3 ~ .,
family = binomial(link = "logit"),
data=list[["part3"]],
method="brglmFit")
summary(fit)
fit = glm(p4 ~ .,
family = binomial(link = "logit"),
data=list[["part4"]],
method="detect_separation");fit
fit = glm(p5 ~ .,
family = binomial(link = "logit"),
data=list[["part5"]],
method="detect_separation");fit
# - ignoring the categorical nature of the responses ignores the separation problem (but also gives completely different results)
a = data.frame(apply(list[["part3"]],2,function(x){as.numeric(as.character(x))}))
a$Gender = list[["part3"]]$Gender
fit = glm(p3 ~ .,
family = binomial(link = "logit"),
data=a)
summary(fit)
car::vif(fit)
# Part4
fit = glm(p4 ~ .,
family = binomial(link = "logit"),
data=list[["part4"]],
method="detect_separation");fit
summary(fit)
a = data.frame(apply(list[["part4"]],2,function(x){as.numeric(as.character(x))}))
a$Gender = list[["part4"]]$Gender
fit = glm(p4 ~ .,
family = binomial(link = "logit"),
data=a)
summary(fit)
car::vif(fit)
# Part5
a = data.frame(apply(list[["part5"]],2,function(x){as.numeric(as.character(x))}))
a$Gender = list[["part5"]]$Gender
fit = glm(p5 ~ .,
family = binomial(link = "logit"),
data=a)
summary(fit)
car::vif(fit)
# Part3_5
a = list[["part3_5"]]
fit = glm(p3_5 ~ .,
family = binomial(link = "logit"),
data=a[,-which(names(a)=="Q17_2")], # Q17_2 showed large VIF
method="brglmFit");fit
summary(fit)
# --------------- ------------------
sub = data[,c("Gender","Age","Fase","Q5",order_qs)]
sub = data.frame(apply(sub,2,factor))
sub$Age = as.numeric(as.character(sub$Age))
sub$miss = apply(sub[,order_qs],1,function(x){sum(is.na(x))})
fit = glm(miss ~ Gender + Age + Fase + Q5 + Q8,
family = poisson(link = "log"), data=sub[sub$miss>0,])
summary(fit)
# --------------- pairwise comparisons ------------------
p1 = c("Q9", "Q14_1", "Q14_2", "Q14_3", "Q14_4") # +/- RU
p2 = c("Q16", "Q17_2","Q17_3", "Q17_4", "Q17_5", "Q17_6") # = CFC
p3 = c("Q24_1", "Q24_2", "Q24_3","Q24_4", "Q24_5") # +/- CU
p4 = c("Q20", "Q23", "Q25_2", "Q25_3", "Q25_4", "Q25_5") # +/- CC
p5 = c("Q30", "Q32_1", "Q32_2", "Q32_3", "Q32_4", "Q32_5", "Q32_6","Q32_7") # = CE
incl = c("Gender","Fase","Q5","Q8")
data$p1 = apply(data[,p1],1,function(x){ifelse(anyNA(x),0,1)}) # 1=observed ; 0=NA
data$p2 = apply(data[,p2],1,function(x){ifelse(anyNA(x),0,1)})
data$p3 = apply(data[,c(p3)],1,function(x){ifelse(anyNA(x),0,1)})
data$p4 = apply(data[,c(p4)],1,function(x){ifelse(anyNA(x),0,1)})
data$p5 = apply(data[,p5],1,function(x){ifelse(anyNA(x),0,1)})
part1 = data[,c(incl,"p1")]
part2 = data[!(data$p1==0),c(incl,p1,"p2")] # exclude the previous students who dropped out in part1
part3 = data[!(data$p1==0 | data$p2==0),c(incl,p1,p2,"p3")]
part4 = data[!(data$p1==0 | data$p2==0 | data$p3==0),c(incl,p1,p2,p3,"p4")]
part5 = data[!(data$p1==0 | data$p2==0 | data$p3==0 | data$p4==0),c(incl,p1,p2,p3,p4,"p5")]
list = list(part1=part1,part2=part2,part3=part3,part4=part4,part5=part5)
for (i in 1:5){
list[[i]] = data.frame(apply(list[[i]],2,factor))
}
# 1. P-values
# p1
vars = c("Gender","Fase","Q5","Q8")
c_p1 =c()
for (i in vars){
name = paste("p1",i,sep="_")
c_p1[name] = fisher.test(table(data[,c("p1",i)]))$p
}
# p2
vars = c("Gender","Fase","Q5","Q8",p1)
c_p2 =c()
for (i in vars){
name = paste("p2",i,sep="_")
c_p2[name] = fisher.test(table(data[,c("p2",i)]))$p
}; round(c_p2,3)
# p3
vars = c("Gender","Fase","Q5","Q8",p1,p2)
c_p3 =c()
for (i in vars){
name = paste("p3",i,sep="_")
c_p3[name] = fisher.test(table(data[,c("p3",i)]))$p
}; round(c_p3,3)
# p4
vars = c("Gender","Fase","Q5","Q8",p1,p2,p3)
c_p4 =c()
for (i in vars){
name = paste("p4",i,sep="_")
c_p4[name] = fisher.test(table(data[,c("p4",i)]))$p
}; round(c_p4,3)
# p5
vars = c("Gender","Fase","Q5","Q8",p1,p2,p3,p4)
c_p5 =c()
for (i in vars){
name = paste("p5",i,sep="_")
c_p5[name] = fisher.test(table(data[,c("p5",i)]))$p
}; round(c_p5,3)
ps = c(c_p1,c_p2,c_p3,c_p4,c_p5)
pvals = data.frame(p = ps,
p_adj = p.adjust(ps,method="BH")) # of "holm"
pvals$part = substr(row.names(pvals),1,2)
pvals$Q = substr(row.names(pvals),4,10)
pvals$part = ifelse(pvals$part=="p1","part1",
ifelse(pvals$part=="p2","part2",
ifelse(pvals$part=="p3","part3",
ifelse(pvals$part=="p4","part4","part5"))))
# 2. Graph
win.graph(width = 14,height = 14)
nr=0 # Counter
# Number of columns in plot
cols = 6
# Total number of rows in plot
c1 = 1 + 5*ceiling(length(incl)/cols) # part1
c2 = 1 + 5*ceiling(length(c(incl,p1))/cols) # part2
c3 = 1 + 5*ceiling(length(c(incl,p1,p2))/cols) # part3
c4 = 1 + 5*ceiling(length(c(incl,p1,p2,p3))/cols) # part4
c5 = 1 + 5*ceiling(length(c(incl,p1,p2,p3,p4))/cols) # part5
rows = sum(c1,c2,c3,c4,c5)
# What percentages to plot
howplot = "Distributions_of_dropout_categories" # other: "Distributions_of_response_categories"
for (part in names(list)){
nr = nr+1
# ---------------------------- percentages----------------------------------------
if (howplot == "Distributions_of_dropout_categories"){
# Copy dataset:
t = list[[part]]
# Get the dropout variable
pi = names(t)[ncol(t)] # is always the last variable in the dataset
# sample sizes:
n0 = table(t[,pi])[1] # Number of dropouts in part
n1 = table(t[,pi])[2] # Number of completers
# Create percentages per category:
plot = list()
qs = names(t)[names(t)!=pi]
for (i in qs){
# Calculate percentages of the response categories for the completers and non-completers
a = table(t[t[,pi]==0,i])*100/n0
b = table(t[t[,pi]==1,i])*100/n1
# Number of catergories
nl = length(a)
# Collect in a dataframe
plot[[i]] = data.frame(levels = rep(names(a),2),
percent = c(a,b),
drop = c(rep(0,nl),rep(1,nl)))
}
}
if (howplot == "Distributions_of_response_categories"){
# Copy dataset:
t = list[[part]]
# Create percentages per category:
qs = names(t[1:(ncol(t)-1)]) # dropout is last column
plot = list()
for (i in qs){
percents = c()
for (cats in levels(factor(t[,i]))){ # loop over the levels for the respective variable
tab = table(droplevels(t[t[,i]==cats,c(i,names(t)[length(names(t))])]))
if (ncol(tab)==1){
tab = cbind(tab,"0"=0)
tab = tab*100/sum(tab)
percents = rbind(percents,tab)
} else{
tab = tab*100/sum(tab)
percents = rbind(percents,tab)
}
}
d = cbind(stack(data.frame(percents)),level=rep(row.names(percents),2))
names(d)=c("percent","drop","levels")
d$drop = ifelse(d$drop=="X0",0,1)
plot[[i]] = d
}
}
# ------------------------------ plotting ---------------------------------------
pushViewport(viewport(layout=grid.layout( rows , cols )))
# a. Title graph
ti = ggplot(data=data.frame(x=c(1),y=c(1)),aes(x=x,y=y))+
geom_point(shape=as.character(nr),size=5)+
theme_void()
if (nr==1){print(ti,vp=viewport(layout.pos.row=1,layout.pos.col=c(1:cols)))}
if (nr==2){print(ti,vp=viewport(layout.pos.row=(1+c1),layout.pos.col=c(1:cols)))}
if (nr==3){print(ti,vp=viewport(layout.pos.row=(1+c1+c2),layout.pos.col=c(1:cols)))}
if (nr==4){print(ti,vp=viewport(layout.pos.row=(1+c1+c2+c3),layout.pos.col=c(1:cols)))}
if (nr==5){print(ti,vp=viewport(layout.pos.row=(1+c1+c2+c3+c4),layout.pos.col=c(1:cols)))}
# b. Graphs per survey item
for (i in 1:length(plot)){
# Get the pvalues from the 'pvals' dataset
item = names(plot)[i]
a1 = round(pvals$p[pvals$part==part & pvals$Q==item],3) # unadjusted pvalue
a2 = round(pvals$p_adj[pvals$part==part & pvals$Q==item],3) # unadjusted pvalue
# The plot
g = ggplot(data=plot[[i]],aes(x=levels,y=percent,fill=factor(drop)))+
geom_col(position="dodge")+
scale_fill_manual(values = c("brown4","antiquewhite2"),guide=FALSE)+ # dropout=0 -> brown; completer=1 -> antiquewhite
coord_flip()+
ggtitle(paste(names(plot)[i]))+
scale_x_discrete(expand=c(0,0))+scale_y_continuous(expand=c(0,0))+ # Bars start at axis (remakr: should be in fornt of theme for overwriting reasons)
ylab(paste("p = ",a1," | ","p adj = ",a2,sep=""))+
theme(# Remove plot background:
panel.background = element_blank(),
# Remove axes & its attributes:
axis.line.y=element_line(), # draw yaxis
axis.text.x = element_blank(), # remark: coord_flip --> x becomes y
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
axis.title.x = element_text(size=11),
# Remove facet_grid boxes
strip.text.x = element_blank(),
# Centre title:
plot.title = element_text(hjust = 0.5,vjust=1,color = "Gray15"))
# Position of the plot
if (nr==1){
if (i %in% c(1:cols)){print(g,vp=viewport(layout.pos.row=1+c(1:5),layout.pos.col=i))}
if (i %in% c((cols+1):(2*cols))){ # when there are more variables to plot then columns specified
start = max(1+c(1:5))+1
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-cols)))}
if (i %in% c((2*cols+1):(3*cols))){
start = max(1+c(1:5))+1+5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-2*cols)))}
if (i %in% c((3*cols+1):(4*cols))){
start = max(1+c(1:5))+1+2*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-3*cols)))}
if (i %in% c((4*cols+1):(5*cols))){
start = max(1+c(1:5))+1+3*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-4*cols)))}
if (i %in% c((5*cols+1):(6*cols))){
start = max(1+c(1:5))+1+4*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-5*cols)))}
if (i %in% c((6*cols+1):(7*cols))){
start = max(1+c(1:5))+1+5*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-6*cols)))}
}
if (nr==2){
if (i %in% c(1:cols)){print(g,vp=viewport(layout.pos.row=c1+1+c(1:5),layout.pos.col=i))}
if (i %in% c((cols+1):(2*cols))){
start = max(c1+1+c(1:5))+1
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-cols)))}
if (i %in% c((2*cols+1):(3*cols))){
start = max(c1+1+c(1:5))+1+5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-2*cols)))}
if (i %in% c((3*cols+1):(4*cols))){
start = max(c1+1+c(1:5))+1+2*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-3*cols)))}
if (i %in% c((4*cols+1):(5*cols))){
start = max(c1+1+c(1:5))+1+3*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-4*cols)))}
if (i %in% c((5*cols+1):(6*cols))){
start = max(c1+1+c(1:5))+1+4*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-5*cols)))}
if (i %in% c((6*cols+1):(7*cols))){
start = max(c1+1+c(1:5))+1+5*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-6*cols)))}
}
if (nr==3){
if (i %in% c(1:cols)){print(g,vp=viewport(layout.pos.row=1+c(1:5)+c1+c2,layout.pos.col=i))}
if (i %in% c((cols+1):(2*cols))){
start = max(1+c(1:5)+c1+c2)+1
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-cols)))}
if (i %in% c((2*cols+1):(3*cols))){
start = max(1+c(1:5)+c1+c2)+1+5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-2*cols)))}
if (i %in% c((3*cols+1):(4*cols))){
start = max(1+c(1:5)+c1+c2)+1+2*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-3*cols)))}
if (i %in% c((4*cols+1):(5*cols))){
start = max(1+c(1:5)+c1+c2)+1+3*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-4*cols)))}
if (i %in% c((5*cols+1):(6*cols))){
start = max(1+c(1:5)+c1+c2)+1+4*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-5*cols)))}
if (i %in% c((6*cols+1):(7*cols))){
start = max(1+c(1:5)+c1+c2)+1+2*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-6*cols)))}
}
if (nr==4){
if (i %in% c(1:cols)){print(g,vp=viewport(layout.pos.row=1+c(1:5)+c1+c2+c3,layout.pos.col=i))}
if (i %in% c((cols+1):(2*cols))){
start = max(1+c(1:5)+c1+c2++c3)+1
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-cols)))}
if (i %in% c((2*cols+1):(3*cols))){
start = max(1+c(1:5)+c1+c2++c3)+1+5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-2*cols)))}
if (i %in% c((3*cols+1):(4*cols))){
start = max(1+c(1:5)+c1+c2++c3)+1+2*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-3*cols)))}
if (i %in% c((4*cols+1):(5*cols))){
start = max(1+c(1:5)+c1+c2++c3)+1+3*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-4*cols)))}
if (i %in% c((6*cols+1):(7*cols))){
start = max(1+c(1:5)+c1+c2++c3)+1+4*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-5*cols)))}
if (i %in% c((7*cols+1):(8*cols))){
start = max(1+c(1:5)+c1+c2++c3)+1+5*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-6*cols)))}
}
if (nr==5){
if (i %in% c(1:cols)){print(g,vp=viewport(layout.pos.row=1+c(1:5)+c1+c2+c3+c4,layout.pos.col=i))}
if (i %in% c((cols+1):(2*cols))){
start = max(1+c(1:5)+c1+c2+c3+c4)+1
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-cols)))}
if (i %in% c((2*cols+1):(3*cols))){
start = max(1+c(1:5)+c1+c2+c3+c4)+1+5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-2*cols)))}
if (i %in% c((3*cols+1):(4*cols))){
start = max(1+c(1:5)+c1+c2+c3+c4)+1+2*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-3*cols)))}
if (i %in% c((4*cols+1):(5*cols))){
start = max(1+c(1:5)+c1+c2+c3+c4)+1+3*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-4*cols)))}
if (i %in% c((5*cols+1):(6*cols))){
start = max(1+c(1:5)+c1+c2+c3+c4)+1+4*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-5*cols)))}
if (i %in% c((6*cols+1):(7*cols))){
start = max(1+c(1:5)+c1+c2+c3+c4)+1+5*5
print(g,vp=viewport(layout.pos.row=c(start:(start+4)),layout.pos.col=(i-6*cols)))}
}
}
}
export::graph2ppt(file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/3. Results/1. Descriptive statistics/Missingness_parts_hist_p_2.ppt",
width=100,height=200,append=TRUE)
|
c38eed1095373b687eed3cafc6117719cb1f0b58
|
66539bfe3ccea078550d60b55b1d20b8b484dc90
|
/R/ind_to_day.R
|
ce4af8cca16ac009cfcac3d61f89623c90ab6378
|
[] |
no_license
|
xinyue-L/PML
|
ee1e189f32ecbaad4f24d710d517e818d9a231b6
|
79f67a8d3443b514e4536daa4579524881096292
|
refs/heads/master
| 2020-07-05T17:48:39.537834
| 2020-05-07T03:59:40
| 2020-05-07T03:59:40
| 202,718,263
| 0
| 1
| null | 2020-05-07T04:01:11
| 2019-08-16T11:46:35
|
R
|
UTF-8
|
R
| false
| false
| 1,103
|
r
|
ind_to_day.R
|
#' trelliscope auxillary function
#'
#' Take individual characteristics and map it to individual-day observations. For example, individual A/B is 40/45
#' years old and has 4/2 day observations in dataset D. Therefore, mapping age to dataset D generates
#' 40, 40, 40, 40, 45, 45 for day 1,2,3,4/1,2 observation of A/B respectively.
#'
#' @param x covariates data for individuals to be merged.
#' @param df day observation dataset.
#'
#' @keywords internal
#'
#' @return individual to day observations.
ind_to_day <- function(x,df) {
if(is.list(x)) {
temp <- unlist(lapply(split(df$ID,df$ID),length))
temp2 <- do.call("cbind",x)
re <- list()
for(i in 1:length(temp)) {
re[[i]] <- temp2[,rep(i,temp[i])]
}
re <- do.call("cbind",re)
temp <- list()
for (i in 1:ncol(re)) temp[[i]] <- re[,i]
return(temp)
} else if (is.vector(x)) {
temp <- unlist(lapply(split(df$ID,df$ID),length))
temp <- cbind(temp,x)
temp <- unlist(apply(temp,1,function(x) rep(x[2],x[1])))
names(temp) <- NULL
temp <- as.numeric(temp)
return(temp)
}
}
|
419de38ae893a15df04dac09bb11e011af97a232
|
3fc3964396f8010aae9345d37f551c4431c52ff9
|
/man/nii2mnc.help.Rd
|
1aefd804ded0aba4783af7898a2355a42924c442
|
[] |
no_license
|
muschellij2/freesurfer
|
ff96f465ebbfbb0b7ce18644be5f4c5ea753fc45
|
7d70f616e760d8d3a453a652d98756e34877fed7
|
refs/heads/master
| 2021-06-24T00:57:12.644687
| 2020-12-08T18:41:34
| 2020-12-08T18:41:34
| 67,370,835
| 9
| 8
| null | 2020-11-15T23:42:38
| 2016-09-04T22:12:47
|
R
|
UTF-8
|
R
| false
| true
| 283
|
rd
|
nii2mnc.help.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nii2mnc.R
\name{nii2mnc.help}
\alias{nii2mnc.help}
\title{Convert NIfTI to MNC Help}
\usage{
nii2mnc.help()
}
\value{
Result of \code{fs_help}
}
\description{
This calls Freesurfer's \code{mnc2nii} help
}
|
75de42b4d0439eb87a5688fc4dc62591bf544e4a
|
8105d46b2ae06b7bb76d3c0ab0fc195b687bd750
|
/R/hydrographTEST.R
|
73bc4887e4a9000c6072e6b9aa09e3e900d1512c
|
[] |
no_license
|
tnkocis/stReamflowstats
|
c8f0d8b905afccd40fc5a280f17378de4ba800bf
|
0fc1c7ff1eb024e8434ee5898884e02e95fa7b51
|
refs/heads/master
| 2020-04-12T02:25:09.302694
| 2017-07-01T01:43:56
| 2017-07-01T01:43:56
| 34,279,048
| 0
| 3
| null | 2015-04-24T21:38:07
| 2015-04-20T18:37:04
|
R
|
UTF-8
|
R
| false
| false
| 3,207
|
r
|
hydrographTEST.R
|
# TODO: Add comment
#
# Author: tiffn_000
###############################################################################
plot(blah$All$Continuous, type="n")
lines(blah$All$`1960s`, col="red")
lines(blah$All$`1970s`, col="orange")
lines(blah$All$`1980s`, col="yellow")
lines(blah$All$`1990s`, col="green")
lines(blah$All$`2000s`, col="blue")
lines(blah$All$`2010s`, col="purple")
plot(blah$W$Continuous, type="n")
lines(blah$C$Continuous, col="red")
lines(blah$D$Continuous, col="orange")
lines(blah$BN$Continuous, col="yellow")
lines(blah$AN$Continuous, col="green")
lines(blah$W$Continuous, col="blue")
legend(x=as.Date("0001-10-01"), y=.0020, legend=c("C", "D", "BN", "AN"),
fill=c("red","orange","yellow","green"))
dev.new()
plot(blah2$W$Continuous, type="n")
lines(blah2$C$Continuous, col="red")
lines(blah2$D$Continuous, col="orange")
lines(blah2$BN$Continuous, col="yellow")
lines(blah2$AN$Continuous, col="green")
lines(blah2$W$Continuous, col="blue")
legend(x=as.Date("0001-10-01"), y=.015, legend=c("C", "D", "BN", "AN","W"),
fill=c("red","orange","yellow","green","blue"))
dev.new()
par( mfrow=c(7,1))
date.seq <- seq(from=as.Date("0001-12-01"), to= as.Date("0002-02-28"), by="day")
val.seq <- rep(NA, length(date.seq))
plot(date.seq,val.seq,
#xlim=c(as.Date("0001-12-01"),as.Date(("0002-02-28"))),
ylim=c(0.01,0.08),
ylab="Discharge (maf)",
xlab="Date")
lines(blah2$All$`1960s`, col="red")
lines(blah2$All$`1970s`, col="orange")
lines(blah2$All$`1980s`, col="yellow")
lines(blah2$All$`1990s`, col="green")
lines(blah2$All$`2000s`, col="blue")
lines(blah2$All$`2010s`, col="purple")
legend(x=as.Date("0001-11-01"), y=.00010,
legend=c("1960s", "1970s", "1980s", "1990s","2000s", "2010s"),
fill=c("red","orange","yellow","green","blue","purple"))
ggplot(blah2$W$Continuous, aes(x = Date, y = Discharge_maf_avg)) +
geom_line(data = blah2$C$Continuous, aes(color = "Critical")) +
geom_line(data = blah2$D$Continuous, aes(color = "Dry")) +
geom_line(data = blah2$BN$Continuous, aes(color = "Below Normal")) +
geom_line(data = blah2$AN$Continuous, aes(color = "Above Normal")) +
geom_line(data = blah2$W$Continuous, aes(color = "Wet")) +
scale_color_manual(values = c( "Critical" = "red", "Dry" = "orange",
"Below Normal" = "yellow", "Above Normal" = "green", "Wet" = "blue"), name="Year Type")+
labs(title = "Mean Year Type Hydrograph for San Joaquin River Near Newman, CA (11274000)",
x = "Month", y="Discharge (MAF)")+
scale_x_date(labels = date_format("%b"), breaks=date_breaks("months"))
ggplot(m, aes(x = yeartype, y = Volume, width=0.9, fill=yeartype)) + geom_bar(stat="identity") + facet_wrap(~gauge)+
theme(axis.text.x=element_text(size=10)) + scale_fill_brewer(palette = "YlGnBu") +
scale_x_discrete(labels=c("C", "D", "BN", "AN","W")) + guides(fill=guide_legend(title="Year Type", reverse=TRUE))+
labs(title="Total Flows Above 90th Percentile For Average Year Type (100 Years of Data)
November to January",
x="Year Type", y="Magnitude of Average Year Type Total Flow (TAF)")+
scale_y_continuous(labels = comma, breaks=pretty_breaks(n=10))
|
d50f5d92ef6f6c93164bf46229b1d388c7cb71f6
|
1567bf78cd76fd6e5909e6e0dbcb71e4a4f69621
|
/man/DaMiR.FSelect.Rd
|
46ee4a3ead2d33fe5d2ab333dd1a9dea8c445eef
|
[] |
no_license
|
BioinfoMonzino/DaMiRseq
|
734a20159b09ce37b0f54524d0be4bca0e435cc5
|
318c280f23f009f27f1d3841f63bb916e75a46c4
|
refs/heads/master
| 2021-11-21T12:30:59.022190
| 2021-08-19T13:06:35
| 2021-08-19T13:06:35
| 81,440,079
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,451
|
rd
|
DaMiR.FSelect.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Feature_Selection.R
\name{DaMiR.FSelect}
\alias{DaMiR.FSelect}
\title{Feature selection for classification}
\usage{
DaMiR.FSelect(
data,
df,
th.corr = 0.6,
type = c("spearman", "pearson"),
th.VIP = 3,
nPlsIter = 1
)
}
\arguments{
\item{data}{A transposed data frame or a matrix of normalized expression
data.
Rows and Cols should be,
respectively, observations and features}
\item{df}{A data frame with known variables; at least one column
with
'class' label must be included}
\item{th.corr}{Minimum threshold of correlation between class and
PCs; default is 0.6. Note. If df$class has more than two levels,
this option is disable and the number of PCs is set to 3.}
\item{type}{Type of correlation metric; default is "spearman"}
\item{th.VIP}{Threshold for \code{bve_pls} function, to remove
non-important variables; default is 3}
\item{nPlsIter}{Number of times that \link{bve_pls} has to run.
Each iteration produces a set of selected features, usually similar
to each other but not exacly the same! When nPlsIter is > 1, the
intersection between each set of selected features is performed;
so that, only the most robust features are selected. Default is 1}
}
\value{
A list containing:
\itemize{
\item An expression matrix with only informative features.
\item A data frame with class and optional variables information.
}
}
\description{
This function identifies the class-correlated principal
components (PCs)
which are then used to implement a backward variable elimination
procedure for the removal of non informative features.
}
\details{
The function aims to reduce the number of features to obtain
the most informative variables for classification purpose. First,
PCs obtained by principal component analysis (PCA) are correlated
with "class". The correlation threshold is defined by the user
in \code{th.corr} argument. The higher is the correlation, the
lower is the number of PCs returned. Importantly, if df$class has
more than two levels, the number of PCs is automatically set to 3.
In a binary experimental setting, users should pay attention to
appropriately set the \code{th.corr} argument because it will also
affect the total number of selected features that ultimately
depend on the number of PCs. The \code{\link{bve_pls}} function
of \code{plsVarSel} package is, then, applied.
This function exploits a backward variable elimination procedure
coupled to a partial least squares approach to remove those variable
which are less informative with respect to class. The returned
vector of variables is further reduced by the following
\code{\link{DaMiR.FReduct}} function in order to obtain a subset of
non correlated putative predictors.
}
\examples{
# use example data:
data(data_norm)
data(df)
# extract expression data from SummarizedExperiment object
# and transpose the matrix:
t_data<-t(assay(data_norm))
t_data <- t_data[,seq_len(100)]
# select class-related features
data_reduced <- DaMiR.FSelect(t_data, df,
th.corr = 0.7, type = "spearman", th.VIP = 1)
}
\references{
Tahir Mehmood, Kristian Hovde Liland, Lars Snipen and
Solve Saebo (2011).
A review of variable selection methods in Partial Least Squares
Regression. Chemometrics and Intelligent Laboratory Systems
118, pp. 62-69.
}
\seealso{
\itemize{
\item \code{\link{bve_pls}}
\item \code{\link{DaMiR.FReduct}}
}
}
\author{
Mattia Chiesa, Luca Piacentini
}
|
777fb84f8cb24ee0527a4283c924841854da9015
|
e29dd0b088d8e898a1382b8cd59a3048807ccaa1
|
/R/dbscan.R
|
78eea39d2d31c257ea55ba62bf176e0afabd66fa
|
[] |
no_license
|
fcen-amateur/tp5-aed
|
df58bedd027a2bd02b8c1534b0a862a48993fec6
|
f15ec8d8a1b0e1a0694e7914bc2732741fca66af
|
refs/heads/master
| 2021-09-18T17:09:05.169836
| 2018-07-17T11:06:34
| 2018-07-17T11:06:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,049
|
r
|
dbscan.R
|
dbscan <- function(X, epsilon = 0.5, min_pts = 5, fun_dist = distancia_euclidea) {
if (is.data.frame(X)) { X <- data.matrix(X) }
N <- dim(X)[1]
etiquetas <- rep(0, N)
clase <- 0
for (punto in seq(N)) {
# si el punto ya fue etiquetado, seguir
if (etiquetas[[punto]] != 0) { next }
# computar los vecinos directos del punto en cuestión
vecinos_directos <- vecinos(X, punto, epsilon, fun_dist)
# si tiene menos de min_pts vecinos, etiquetar como ruido y continuar
if (length(vecinos_directos) < min_pts) {
etiquetas[[punto]] <- -1
next
}
# si no, etiquetar al punto con una nueva clase
clase <- clase + 1
etiquetas[[punto]] <- clase
# inicializo la list de vecinos con los vecinos directos
vecinos <- vecinos_directos
# mientras haya algun vecino del punto sin etiquetar, explorar su vecindad
while (any(etiquetas[vecinos] == 0)) {
for (punto_vecino in vecinos) {
# cambiar las etiquetas de ruido por la de la clase actual
if (etiquetas[[punto_vecino]] == -1) { etiquetas[[punto_vecino]] <- clase }
# ignorar los puntos ya etiquetados
if (etiquetas[[punto_vecino]] != 0) { next }
# si encuentro un punto sin etiquetar, etiquetarlo y buscar sus vecinos ("vecinos segundos" del punto original)
etiquetas[[punto_vecino]] <- clase
vecinos_segundos <- vecinos(X, punto_vecino, epsilon, fun_dist)
# si el punto vecino tiene suficientes "vecinos segundos", incorporarlos a la vecindad del punto original
if (length(vecinos_segundos) >= min_pts) {
vecinos <- unique(c(vecinos, vecinos_segundos))
}
}
}
}
return(etiquetas)
}
vecinos <- function(X, i, epsilon, fun_dist) {
# devuelve un vector con los índices de los puntos vecinos al i-ésimo punto en df
distancias <- apply(X, 1, function(x){fun_dist(X[i,], x)})
which(distancias < epsilon)
}
distancia_euclidea <- function(x, y) {
# Comento los pasos de seguridad porque ralentizan >10x la ejecucion
# assertthat::assert_that(length(x) == length(y))
# assertthat::assert_that(is.numeric(x) && is.numeric(y))
sqrt(sum((x - y)^2))
}
# DBSCAN(DB, distFunc, eps, minPts) {
# C = 0 /* Cluster counter */
# for each point P in database DB {
# if label(P) ≠ undefined then continue /* Previously processed in inner loop */
# Neighbors N = RangeQuery(DB, distFunc, P, eps) /* Find neighbors */
# if |N| < minPts then { /* Density check */
# label(P) = Noise /* Label as Noise */
# continue
# }
# C = C + 1 /* next cluster label */
# label(P) = C /* Label initial point */
# Seed set S = N \ {P} /* Neighbors to expand */
# for each point Q in S { /* Process every seed point */
# if label(Q) = Noise then label(Q) = C /* Change Noise to border point */
# if label(Q) ≠ undefined then continue /* Previously processed */
# label(Q) = C /* Label neighbor */
# Neighbors N = RangeQuery(DB, distFunc, Q, eps) /* Find neighbors */
# if |N| ≥ minPts then { /* Density check */
# S = S ∪ N /* Add new neighbors to seed set */
# }
# }
# }
# }
#
# RangeQuery(DB, distFunc, Q, eps) {
# Neighbors = empty list
# for each point P in database DB { /* Scan all points in the database */
# if distFunc(Q, P) ≤ eps then { /* Compute distance and check epsilon */
# Neighbors = Neighbors ∪ {P} /* Add to result */
# }
# }
# return Neighbors
# }
|
78e2c63734bf26af743f71d587d8de22923d88a0
|
ebfe6574ca88dcf7aebd887adee0f2969e8b971a
|
/plot3.R
|
d62e3428541701fc15dbfe9db296866b8227d7c3
|
[] |
no_license
|
hermanmeijer/ExData_Plotting1
|
fbdb61b22a57b4888734f11f10a2632339b74833
|
932856f62f38dd4a7a51edb92245495d5cbcbffd
|
refs/heads/master
| 2020-12-28T19:11:25.002136
| 2016-01-09T10:49:53
| 2016-01-09T10:49:53
| 49,283,332
| 0
| 0
| null | 2016-01-08T16:32:46
| 2016-01-08T16:32:46
| null |
UTF-8
|
R
| false
| false
| 866
|
r
|
plot3.R
|
## set working directory and read data
setwd("D:/Data Science Course/Assignments/Irvine")
dat<-read.csv("household_power_consumption.txt", sep=";", na.strings = "?",
stringsAsFactors = FALSE)
## restrict dates and format date and time
dat<-dat[dat$Date == "1/2/2007" | dat$Date=="2/2/2007",]
dat$Time <- strptime(paste(dat$Date, dat$Time) , "%d/%m/%Y %H:%M:%S")
dat$Date <- as.Date(dat$Date , "%d/%m/%Y")
## make the plot on the png device and close it
png(file="plot3.png")
plot(dat$Time,dat$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
points(dat$Time,dat$Sub_metering_1,type="l")
points(dat$Time,dat$Sub_metering_2,type="l",col="red")
points(dat$Time,dat$Sub_metering_3,type="l",col="blue")
legend("topright", col=c("black","red","blue"), lwd=1, lty=1,
legend=c("Sub_metring_1","Sub_metring_2","Sub_metring_3") )
dev.off()
|
759ffd1ddaf6d049c18327fa7a34b6c91a5d6525
|
9bdc9b33874e5ccef6cad2ef015f4ad08414f45f
|
/Laks/Rscripts/ExtractExonicSNV.R
|
10b8dda604f838830ba5fd6510aa36f01bcd0f09
|
[] |
no_license
|
junseonghwan/ScRNACloneEvaluation
|
fd891a821c28382cbc11beba5e0923e6b05e454b
|
8a78fca4b473ab2a69e4dbd44acc3e420f4dbfe7
|
refs/heads/master
| 2022-12-14T06:19:51.041374
| 2020-08-16T20:13:49
| 2020-08-16T20:13:49
| 271,387,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,477
|
r
|
ExtractExonicSNV.R
|
library(GenomicRanges)
library(ScRNAClone)
# TODO: Location to GTF file to be provided as an argument.
gtf <- read.table("/Users/seonghwanjun/data/references/Homo_sapiens.GRCh37.75.gtf", header=F, sep="\t")
names(gtf) <- c("seqname", "source", "feature", "start", "end", "score", "strand", "frame", "attribute")
exon.gtf <- subset(gtf, feature == "exon")
dim(exon.gtf)
laks_snv <- read.table("data/Laks/ov2295_clone_snvs.csv", header=T, sep=",")
laks_snv$loc <- paste(laks_snv$chrom, laks_snv$coord, sep=":")
laks_snv <- laks_snv[!duplicated(laks_snv$loc),]
dim(laks_snv)
laks_snv.gr <- ConstructGranges(laks_snv$chrom, laks_snv$coord, width = 0)
exon.gtf.gr <- ConstructGranges(exon.gtf$seqname, exon.gtf$start, width = exon.gtf$end - exon.gtf$start)
overlaps <- findOverlaps(laks_snv.gr, exon.gtf.gr)
laks_snv_exon <- laks_snv[overlaps@from,]
laks_snv_exon <- laks_snv_exon[!duplicated(laks_snv_exon$loc),]
length(unique(laks_snv_exon$loc))
head(laks_snv_exon)
# Output laks_snv_exon.
# We will get read counts from the bulk and scRNA-seq at these locations.
exonic_snv_file <- "data/Laks/ov2295_clone_exonic_snvs.csv"
write.table(laks_snv_exon, exonic_snv_file, quote = F, row.names = F, col.names = T, sep=",")
# Generate BED file for Strelka SNV.
head(laks_snv_exon)
bed <- laks_snv_exon[,c("chrom", "coord")]
colnames(bed) <- c("chrom", "chromStart")
bed$chromEnd <- bed$chromStart
write.table(bed, "data/Laks/ov2295_clone_exonic_snvs.bed", quote=F, row.names = F)
|
c80c6251e8dfdf53fa9272c9f587bc9944e5b778
|
23ac665552c844602528be7702c74c86d80bcff2
|
/prdct2.R
|
f02913a5b5dbda9238fc2a7ce9b9f42650c4322b
|
[] |
no_license
|
sahaditya/sentiment-analysis-model
|
330e83045d6384dbc4e1cc5f6e20dfeb8a4347f7
|
f0e7dfa91caf56921a95018c72730fa9eebd5bef
|
refs/heads/master
| 2021-07-14T04:49:42.991956
| 2017-10-14T13:40:04
| 2017-10-14T13:40:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,038
|
r
|
prdct2.R
|
library(ggplot2)
library(tm)
library(wordcloud)
library(syuzhet)
setwd("D:/Rproject/New folder")
texts<-readLines("pReview2.txt")
docs<-Corpus(VectorSource(texts))
trans<-content_transformer(function(x,pattern) gsub(pattern," ", x))
docs<-tm_map(docs,trans,"/")
docs<-tm_map(docs,trans,",")
docs<-tm_map(docs,trans,"\\|")
docs<-tm_map(docs,content_transformer(tolower))
docs<-tm_map(docs,removeNumbers)
docs<-tm_map(docs,removeWords,stopwords("english"))
docs<-tm_map(docs,removePunctuation)
docs<-tm_map(docs,stripWhitespace)
docs<-tm_map(docs,stemDocument)
dtm<-TermDocumentMatrix(docs)
mat<-as.matrix(dtm)
v<-sort(rowSums(mat),decreasing = TRUE)
d<-data.frame(word = names(v), freq = v)
head(d,10)
set.seed(1056)
wordcloud(words = d$word, freq = d$freq, min.freq = 1, max.words = 200, random.order = FALSE, rot.per = 0.35, colors = brewer.pal(8,"Dark2"))
sentimentP2<-get_nrc_sentiment(texts)
#inserting in database. the freq of words of p2
m<-sentimentP2 #######
con<-mongo("SentiP2", url = "mongodb://localhost")#####
if(con$count()>0) con$drop #####
con$insert(m) #####
#con$drop() #####
######################################################
text<-cbind(texts,sentimentP2)
TotalsentimentP2<- data.frame(colSums(text[,c(2:11)]))
names(TotalsentimentP2)<-"count"
TotalsentimentP2<-cbind("sentimentP2"= rownames(TotalsentimentP2),TotalsentimentP2)
rownames(TotalsentimentP2)<-NULL
ggplot(data = TotalsentimentP2, aes(x = sentimentP2, y = count)) + geom_bar(aes(fill = sentimentP2), stat = "identity") + theme(legend.position = "none") + xlab("Sentiment") + ylab("Total count") + ggtitle("Total sentiment Score")
p2<-ggplot(data = TotalsentimentP2, aes(x = sentimentP2, y = count)) + geom_bar(aes(fill = sentimentP2), stat = "identity") + theme(legend.position = "none") + xlab("Sentiment") + ylab("Total count") + ggtitle("Total sentiment Score")
|
b46ba24100d64da8674d1ea0815b7bb3d3c5fccc
|
61b173db3da0d949dbc9741ec86b02eda23ad591
|
/binomial/R/auxiliary.R
|
8dc22e7ef255d909f792baea2b355584e9edaa5e
|
[] |
no_license
|
stat133-sp19/hw-stat133-luciachenxl
|
bf4a8ec9ec9f5c324f57b3288cd10932ce8ebd27
|
fa2f2d4b9588544001bfe5d0e4af1e6cc1a3de12
|
refs/heads/master
| 2020-04-28T03:18:35.019804
| 2019-05-01T03:38:20
| 2019-05-01T03:38:20
| 174,931,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 925
|
r
|
auxiliary.R
|
# private auxiliary function to calculate the mean of a binomial distribution
aux_mean<-function(trials,prob){
return(trials*prob)}
# private auxiliary function to calculate the variance of a binomial distribution
aux_variance<-function(trials,prob){
return(trials*prob*(1-prob))}
# private auxiliary function to calculate the most likely number of success in n independent trials with probability p of success on each trial
aux_mode<-function(trials,prob){
n<-trials*prob+prob
if(n-as.integer(n)==0){
return(c(n,n-1))
}else{
return(floor(n))
}
}
# private auxiliary function to calculate the skewness of the probability distribution
aux_skewness<-function(trials,prob){
return((1-2*prob)/sqrt(trials*prob*(1-prob)))
}
# private auxiliary function to calculate the kurtosis of the probability distribution
aux_kurtosis<-function(trials,prob){
return((1-6*prob*(1-prob))/(trials*prob*(1-prob)))
}
|
8e734fc71828cad5392c3e5d20f80b5b62deadb9
|
edf85e1521d59deb8eae765cba6f484125f63426
|
/R/miRtest.R
|
5c77608ac428679ce623cbfd1462dbe2723cd024
|
[] |
no_license
|
cran/miRtest
|
73657e65bbab2f680797d461d85663a00ef21137
|
e3394c8f727304c25780d237e17c459ee1bd94ff
|
refs/heads/master
| 2022-06-04T23:57:19.189079
| 2022-05-23T19:50:02
| 2022-05-23T19:50:02
| 17,697,463
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,893
|
r
|
miRtest.R
|
#' Contingency table. Necessary for Fisher test.
#' @param gene.set Vector of gene sets.
#' @param p.val Vector with p-values.
#' @param sign Significane threshold.
#' @author Stephan Artmann
contingency.table = function (gene.set,p.val,sign=0.05) {
sign.gs = length(which(p.val[gene.set] <= 0.05));
sign.not.gs = length(which(p.val[-gene.set] <= 0.05));
cont.tab = c(sign.gs,length(gene.set)-sign.gs,sign.not.gs,length(p.val)-length(gene.set)-sign.not.gs);
dim(cont.tab) = c(2,2)
cont.tab;
}
#' Turn a data.frame indicating gene sets into the allocation matrix.
#' @param df data.frame with mRNAs in its first and miRNAs in its second column.
#' @param X Expression matrix of miRNAs whose row names will be used to generate the list of miRNAs.
#' @param Y Expression matrix of mRNAs whose row names will be used to generate the list of mRNAs.
#' @param verbose Logical. Shall progress be printed?
#' @return Allocation matrix A necessary for "miR.test" function.
#' @author Stephan Artmann
#' @examples
##MAINEXAMPLE
generate.A = function (df,X=NULL,Y=NULL,verbose=TRUE) {
mRNA = unique(df[,1]);
miRNA = unique(df[,2]);
if (!is.null(X)) {
miRNA = unique(rownames(X));
if (length(miRNA) != nrow(X)) {
print("Some miRNAs in X occur more often than once!")
warning("Some miRNAs in X occur more often than once!")
}
}
if (!is.null(Y)) {
mRNA = unique(rownames(Y));
if (length(mRNA) != nrow(Y)) {
print("Some mRNAs in Y occur more often than once!")
warning("Some mRNAs in Y occur more often than once!")
}
}
colnames(df) = c("mRNA","miRNA")
if (verbose) {
progress = seq(1,length(miRNA),length.out=20);
}
A = rep(0,length(miRNA)*length(mRNA));
dim(A) = c(length(mRNA),length(miRNA));
colnames(A) = miRNA;
rownames(A) = mRNA;
for (i in 1:length(miRNA)) {
if (verbose && i %in% round(progress)) print(paste("miRNA",i,"of",length(miRNA),"at",Sys.time()));
x = df[df$miRNA == miRNA[i],];
A[match(x$mRNA,rownames(A)),i] = 1;
}
A
}
#' Internal algorithm: Make limma test one-sided
#' @param fit Result of "lmFit" and "eBayes" functions in "limma" package.
#' @param lower Shall one-sided p-value indicated down-regultation?
limma.one.sided = function (fit,lower=FALSE) {
se.coef <- sqrt(fit$s2.post) * fit$stdev.unscaled
df.total <- fit$df.prior + fit$df.residual
pt(fit$t, df=df.total, lower.tail=lower)[,2]
}
#' internal algorithm for author's convenience. Create a linear model with the limma package.
#' @param X Expression matrix.
#' @param group Group membership of replicates.
#' @param design Design as specified in limma (design matrix, see model.matrix).
#' @author Stephan Artmann
limma.test = function (X,group=NULL,design=NULL) {
if (!is.null(group) & !is.null(design)) stop ("Just specify group *or* design in limma.test()")
if (!is.null(group)) {
design = model.matrix(~group);
}
fit = lmFit(X, design)
fit = eBayes(fit)
fit;
}
#' Internal function for gene set testing.
#' @param A Allocation matrix as in "miR.test" function.
#' @param X miRNA expression matrix as in `miR.test' function. Only necessary when allocation.matrix=TRUE.
#' @param Y mRNA expression matrix as in "miR.test" function.
#' @param group group as in `miR.test' function
#' @param tests Test applied, sie gene.set.tests
#' @param permutation Shall permutation procedure for global tests be applied? Put 'FALSE' to use approximate results or give a number for the number of permutations.
#' @param nrot Number of rotations of rotation tests. Defaults to 1000 to be able to show p-values as low as 10^-3.
#' @param design If specified, group will be ignored. Design matrix as used in `limma' package. Cannot be used with global tests.
#' @param allocation.matrix Logical, is A an allocation matrix with mRNAs in its columns and miRNAs in its rows, or is it an allocation data.frame?
#' @param verbose Defaults to FALSE. If TRUE, progress is printed.
#' @return List of the following, for up- and for down-regulation: Matrix with testing results for every gene set in its rows and the applied gene set test in its columns.
#' @references
#' Artmann, Stephan and Jung, Klaus and Bleckmann, Annalen and Beissbarth, Tim (submitted).
#' Detection of simultaneous group effects in microRNA expression and
#' related functional gene sets.
#'
#' Brunner, E. (2009) Repeated measures under non-sphericity.
#' Proceedings of the 6th St. Petersburg Workshop on Simulation,
#' 605-609.
#'
#' Jelle J. Goeman, Sara A. van de Geer, Floor de Kort, Hans C. van
#' Houwelingen (2004) A global test for groups of genes: testing
#' association with a clinical outcome. Bioinformatics 20, 93-99.
#'
#' Jung, Klaus and Becker, Benjamin and Brunner, Edgar and Beissbarth, Tim (submitted).
#' Comparison of Global Tests for Functinoal Gene Sets in
#' Two-Group Designs and Selection of Potentially
#' Effect-causing Genes.
#'
#' Majewski, IJ, Ritchie, ME, Phipson, B, Corbin, J, Pakusch, M,
#' Ebert, A, Busslinger, M, Koseki, H, Hu, Y, Smyth, GK, Alexander,
#' WS, Hilton, DJ, and Blewitt, ME (2010). Opposing roles of polycomb
#' repressive complexes in hematopoietic stem and progenitor cells.
#' _Blood_, published online 5 May 2010.
#'
#' Mansmann, U. and Meister, R., 2005, Testing differential gene
#' expression in functional groups, _Methods Inf Med_ 44 (3).
#'
#' Smyth, G. K. (2004). Linear models and empirical Bayes methods for
#' assessing differential expression in microarray experiments.
#' _Statistical Applications in Genetics and Molecular Biology_,
#' Volume *3*, Article 3.
#'
#' Wu, D, Lim, E, Francois Vaillant, F, Asselin-Labat, M-L, Visvader,
#' JE, and Smyth, GK (2010). ROAST: rotation gene set tests for
#' complex microarray experiments. _Bioinformatics_, published online
#' 7 July 2010.
#'
#' @author Stephan Artmann
gs.test = function(A,X=NULL,Y,group=NULL,tests,permutation=FALSE,nrot=1000,design=NULL,allocation.matrix=FALSE,verbose=FALSE) {
# Load required libraries
#library(limma)
#library(globaltest)
#library(GlobalAncova)
#library(RepeatedHighDim)
ga.method = "approx";
ga.perm = 0;
gt.perm = 0;
if (permutation) {
ga.method="perm";
gt.perm = permutation;
ga.perm = permutation;
}
if (length(tests) == 0) stop ("No gene set tests specified in gs.test!");
if (!is.null(design) && !is.null(group)) stop ("Group and design specified, aborting")
if (!is.null(design)) tests = tests[tests != "GA" & tests != "globaltest"]
if (length(tests) == 0) stop ("Only competitive gene set tests and `ROAST' can be applied to design matrices! This is not yet implemented for the remaining tests.");
# Prepare the p-value matrix depending on which tests have been chosen
testNo = length(tests);
miRno = ncol(A);
if (!allocation.matrix) {
miRs = rownames(X);
miRno = length(miRs);
}
P.l = rep(NA,miRno*(testNo));
dim(P.l) = c(miRno,(testNo));
P.h = P.l;
# Do the testing for the gene set tests chosen
gt.options(transpose=TRUE);
if (is.null(design)) design = model.matrix(~group);
if (is.null(design)) {
fit = limma.test(Y,group=group);
} else {
fit = limma.test(Y,design=design);
}
fit.l = limma.one.sided(fit,lower=TRUE);
fit.h = 1 - fit.l;
fit.l.adj = p.adjust(fit.l,method="BH");
fit.h.adj = p.adjust(fit.h,method="BH");
rank.low = rank(fit.l,ties.method="random");
rank.high = rank(1-fit.l,ties.method="random");
M = fit$coef[,2];
L.romer = list();
L.roast = list();
if (verbose) {
progress = seq(1,miRno,length.out=20);
}
for (j in 1:miRno) {
if (verbose && j %in% round(progress)) print (paste("Gene Set",j,"of",miRno,"Gene Sets at",Sys.time()));
if (allocation.matrix) {
index =(A[,j] == 1);
} else {
if (is.null(rownames(Y))) stop ("Please specify the gene names as the row names of Y. Otherwise it is impossible to match the data in A to the genes in Y.")
index = match(A[A[,2] == miRs[j],1],rownames(Y));
ind = rep(FALSE,nrow(Y));
ind[index] = TRUE;
index = ind;
}
if ("KS" %in% tests) {
if (length(which(index) > 2)) {
ks.rank.low = ks.test(rank.low[index],"punif",min=1,max=max(rank.low),alternative="greater")$p.value;
ks.rank.high = ks.test(rank.high[index],"punif",min=1,max=max(rank.high),alternative="greater")$p.value;
} else {
ks.rank.low = NA;
ks.rank.high = NA;
}
P.l [j,match("KS",tests)] = ks.rank.low;
P.h [j,match("KS",tests)] = ks.rank.high;
}
if ("W" %in% tests) {
if (length(which(index) > 2)) {
w.rank.low = wilcox.test(rank.low[index],rank.low[-which(index)],mu=0,paired=FALSE,alternative="less")$p.value;
w.rank.high = wilcox.test(rank.high[index],rank.high[-which(index)],mu=0,paired=FALSE,alternative="less")$p.value;
} else {
w.rank.low = NA;
w.rank.high = NA;
}
P.l [j,match("W",tests)] = w.rank.low;
P.h [j,match("W",tests)] = w.rank.high;
}
if ("Fisher" %in% tests) {
if (length(which(index) > 2)) {
f.adj.low = fisher.test(contingency.table(gene.set=which(index),p.val=fit.l.adj),alternative="greater")$p.value;
f.adj.high = fisher.test(contingency.table(gene.set=which(index),p.val=fit.h.adj),alternative="greater")$p.value;
} else {
f.adj.low = NA;
f.adj.high = NA;
}
P.l [j,match("Fisher",tests)] = f.adj.low;
P.h [j,match("Fisher",tests)] = f.adj.high;
}
if ("globaltest" %in% tests) {
if (length(which(index & (M<0))) > 2) {
gt.low = gt(group,Y[index & M<0,],permutations=gt.perm)@result[,1];
} else {
gt.low = NA;
}
P.l [j,match("globaltest",tests)] = gt.low;
if (length(which(index & (M>0))) > 2) {
gt.high = gt(group,Y[index & M>0,],permutations=gt.perm)@result[,1];
} else {
gt.high = NA;
}
P.h [j,match("globaltest",tests)] = gt.high;
}
if ("GA" %in% tests) {
if (length(which(index & (M<0))) > 2) {
sink("ga.out")
ga.low = GlobalAncova(Y[index & M<0,],group=group,method=ga.method,perm=ga.perm)$test.result[2];
sink();
} else {
ga.low = NA;
}
P.l [j,match("GA",tests)] = ga.low;
if (length(which(index & (M>0))) > 2) {
sink("ga.out")
ga.high = GlobalAncova(Y[index & M>0,],group=group,method=ga.method,perm=ga.perm)$test.result[2];
sink()
} else {
ga.high = NA;
}
P.h [j,match("GA",tests)] = ga.high;
}
if ("roast" %in% tests) {
if (length(which(index) > 2)) {
L.roast[[length(L.roast) + 1]] = which(index);
} else {
L.roast[[length(L.roast) + 1]] = rep(FALSE,nrow(Y));
}
}
if ("romer" %in% tests) {
if (length(which(index) > 2)) {
L.romer[[length(L.romer) + 1]] = which(index);
} else {
L.romer[[length(L.romer) + 1]] = rep(NA,nrow(Y));
}
}
}
if ("roast" %in% tests) {
if(verbose) print("Starting ROAST procedure...")
Roast = mroast(L.roast,y=Y,design=design,nrot=nrot,adjust.method="none")$P.Value;
P.l[,match("roast",tests)] = Roast[,3];
P.h[,match("roast",tests)] = Roast[,2];
if(verbose) print("Finished ROAST procedure...")
}
if ("romer" %in% tests) {
if(verbose) print("Starting romer procedure...")
Romer = romer(index=L.romer,y=Y,design=design,nrot=nrot);
P.l[,match("romer",tests)] = Romer[,3];
P.h[,match("romer",tests)] = Romer[,2];
if(verbose) print("Finished romer procedure...")
}
list(low=P.l,high=P.h);
}
#' Fisher method of p value combination.
#' @param p1,p2 one-sided p-values that shall be combined.
#' @param check.range If set to "TRUE" values above 1 will be set to 1.
#' @return Combined p-value.
#' @author Stephan Artmann
fisher.combination = function (p1,p2,check.range=FALSE) {
if (check.range) {
if (p1 > 1) {
p1 = 1;
}
if (p2 > 1) {
p2 = 1;
}
}
s = -2*(log(p1) + log(p2));
(1-pchisq(s,df=4));
}
#' Inverse-normal method for p value combination.
#' @param p1,p2 one-sided p-values that shall be combined.
#' @return Two-sided combined p-value.
#' @author Stephan Artmann
inverse.normal.combination = function(p1,p2) {
S = (qnorm(p1) + qnorm(p2))/sqrt(2);
2*(1-pnorm(abs(S)));
}
#' Internal function for author's convenience and more legible code. Applies a function to every column vector of a matrix and a vector.
#' @param M The matrix for whose column vectors mapply shall be used.
#' @param v The vector.
#' @param FUN The function.
#' @param ... Further arguments to be given to FUN.
#' @author Stephan Artmann
m.combine = function(M,v,FUN,...) {
E = mapply(FUN,M,v,...);
dim(E) = dim(M);
E;
}
#' Main Function of miRtest package.
#' @author Stephan Artmann
#' @param X miRNA expression matrix with genes in rows and replicates in columns
#' @param Y mRNA expression matrix with genes in rows and replicates in columns
#' @param A Allocation data.frame or Allocation matrix. An allocation data.frame contains the mRNAs in its first column and the miRNAs in its second column. See vignette `miRtest' for information on Allocation matrices.
#' @param group.miRNA Vector of miRNA group membership, being either numeric or a factor (**this makes a difference**). E. g. if you have four replicates in a control group and three replicates in a treated group, you may choose c(1,1,1,1,2,2,2)
#' @param design.miRNA If specified, group.miRNA will be ignored. Here you can specify a design matrix as it is returned from the model.matrix `limma' function.
#' @param design.mRNA If specified, group.mRNA will be ignored. Here you can specify a design matrix as it is returned from the model.matrix `limma' function.
#' @param group.mRNA Vector of mRNA group membership, being either numeric or a factor (**this makes a difference**).E. g. if you have four replicates in a control group and three replicates in a treated group, you may choose c(1,1,1,1,2,2,2)
#' @param gene.set.tests Test to be applied for gene set testing. Can be one or more of the following: `globaltest', `GA', `KS', `W', `Fisher', `roast', `romer', or `all' if you want to do all tests.
#' @param adjust Muliple hypothesis testing adjustment. Same options as in "p.adjust" function.
#' @param permutation Number of permutations for `globaltest' or `GlobalAncova' gene set tests. Put to "FALSE" to use the approximate p-values instead of permutation ones.
#' @param nrot Number of rotations for rotation tests `ROAST' and `romer'
#' @param allocation.matrix Logical, is A an allocation matrix with mRNAs in its columns and miRNAs in its rows, or is it an allocation data.frame?
#' @param verbose Defaults to FALSE. If TRUE, output on progress is printed.
#' @param errors Defaults to TRUE. If set to FALSE, some errors checking correct sizes of matrices are turned into warning messages.
#' @return Matrix with testing results for every miRNA in its rows and the applied gene set test in its columns. Note that result will depend on whether multiple hypothesis testing correction was applied or not.
#' @references
#' Artmann, Stephan and Jung, Klaus and Bleckmann, Annalen and Beissbarth, Tim (submitted).
#' Detection of simultaneous group effects in microRNA expression and
#' related functional gene sets.
#'
#' Brunner, E. (2009) Repeated measures under non-sphericity.
#' Proceedings of the 6th St. Petersburg Workshop on Simulation,
#' 605-609.
#'
#' Jelle J. Goeman, Sara A. van de Geer, Floor de Kort, Hans C. van
#' Houwelingen (2004) A global test for groups of genes: testing
#' association with a clinical outcome. Bioinformatics 20, 93-99.
#'
#' Jung, Klaus and Becker, Benjamin and Brunner, Edgar and Beissbarth, Tim (submitted).
#' Comparison of Global Tests for Functinoal Gene Sets in
#' Two-Group Designs and Selection of Potentially
#' Effect-causing Genes.
#'
#' Majewski, IJ, Ritchie, ME, Phipson, B, Corbin, J, Pakusch, M,
#' Ebert, A, Busslinger, M, Koseki, H, Hu, Y, Smyth, GK, Alexander,
#' WS, Hilton, DJ, and Blewitt, ME (2010). Opposing roles of polycomb
#' repressive complexes in hematopoietic stem and progenitor cells.
#' _Blood_, published online 5 May 2010.
#'
#' Mansmann, U. and Meister, R., 2005, Testing differential gene
#' expression in functional groups, _Methods Inf Med_ 44 (3).
#'
#' Smyth, G. K. (2004). Linear models and empirical Bayes methods for
#' assessing differential expression in microarray experiments.
#' _Statistical Applications in Genetics and Molecular Biology_,
#' Volume *3*, Article 3.
#'
#' Wu, D, Lim, E, Francois Vaillant, F, Asselin-Labat, M-L, Visvader,
#' JE, and Smyth, GK (2010). ROAST: rotation gene set tests for
#' complex microarray experiments. _Bioinformatics_, published online
#' 7 July 2010.
#'
#' @examples
##MAINEXAMPLE
miR.test = function (X,Y,A,group.miRNA=NULL,group.mRNA=NULL,gene.set.tests="romer",design.miRNA=NULL,design.mRNA=NULL,adjust="none",permutation=FALSE,nrot=1000,allocation.matrix=FALSE,verbose=FALSE,errors=TRUE) {
# library(limma)
### Convert data.frames into matrices
X = as.matrix(X);
Y = as.matrix(Y);
### Check Data Input ###
if (length(gene.set.tests) == 0) stop("Please provide gene.set.tests")
if (allocation.matrix) {
if (!(ncol(A) == nrow(X))) stop("Number of columns of A must equal number of rows of X")
if (errors && !(nrow(A) == nrow(Y))) stop("Number of rows of A must equal number of rows of Y. Check that Y does not have duplicate row names. You can disable this error message with errors=FALSE. Note that then genes ocurring more often than once will have a larger weight in the gene set test.")
}
if (length(gene.set.tests) == 1 && gene.set.tests == "all") gene.set.tests = c("globaltest","GA","KS","W","Fisher","roast","romer")
if (!(all(gene.set.tests %in% c("globaltest","GA","KS","W","Fisher","roast","romer")))) stop("Check gene.set.tests and enter only one or more of the following tests: globaltest, GA, KS, W, Fisher, roast, romer or all if you want to do all tests")
if (!is.null(group.miRNA) & !is.null(design.miRNA)) warning("group.miRNA will be ignored as design.miRNA is specified")
if (!is.null(group.miRNA) & !is.null(group.mRNA)) {
if (!all(levels(group.miRNA) == levels(group.mRNA))) stop ("Group names of miRNA samples must be the same as of the mRNA samples. Aborting");
print("Assuming that group names of miRNA samples are the same as of mRNA samples!");
}
if("roast" %in% gene.set.tests) {
print("Note: For compatibility reasons ROAST is not available in this version.");
print(" It will be added to the next version of miRtest.");
print(" To use miRtest with ROAST refer to older versions.");
gene.set.tests = gene.set.tests[gene.set.tests != "roast"];
}
if (length(gene.set.tests) == 0) stop("Please provide gene.set.tests")
if (!is.null(design.mRNA)) gene.set.tests = gene.set.tests[gene.set.tests != "GA" & gene.set.tests != "globaltest" ]
### Order the allocation matrix ###
if(allocation.matrix) {
if (!any(is.null(colnames(A)),is.null(rownames(X)))) {
A[,order(colnames(A))];
X[order(rownames(X)),];
if (!all(colnames(A) == rownames(X))) warning("Column names of A are not equal to rownames of X")
} else {
warning("Allocation matrix A has no colnames and/or miRNA matrix X has no rownames. Assuming that columns in A are in the same order as rows in X.")
}
if (!any(is.null(colnames(A)),is.null(rownames(Y)))) {
A[order(rownames(A)),];
Y[order(rownames(Y)),];
if(!all(rownames(A) == rownames(Y))) warning("Row names of A are not equal to rownames of Y")
} else {
warning("Allocation matrix A and/or miRNA matrix X has no rownames. Assuming that rows in A are in the same order as rows in Y.")
}
} else {
if (is.null(rownames(X))) stop("Please specify row names of X");
if (is.null(rownames(Y))) stop("Please specify row names of Y");
}
if (!is.null(design.mRNA)) gene.set.tests = gene.set.tests[gene.set.tests != "globaltest" & gene.set.tests != "GA"];
### Do miRNA-wise testing ###
if (!is.null(design.miRNA)) {
miR = limma.test(X,design=design.miRNA);
} else {
miR = limma.test(X,group=group.miRNA);
}
miR.l = limma.one.sided(miR,lower=TRUE);
miR.h = 1-miR.l;
### Do gene set testing ###
tests = gene.set.tests;
if(!is.null(design.mRNA)) {
GS = gs.test(A,X,Y,group=NULL,tests,permutation=permutation,nrot=nrot,design=design.mRNA,allocation.matrix=allocation.matrix,verbose=verbose);
} else {
GS = gs.test(A,X,Y,group.mRNA,tests,permutation=permutation,nrot=nrot,allocation.matrix=allocation.matrix,verbose=verbose);
}
### Combine the results ###
rot.tests = rep(FALSE,length(tests));
if ("roast" %in% tests) {
rot.tests[match("roast",tests)] = TRUE;
}
if ("romer" %in% tests) {
rot.tests[match("romer",tests)] = TRUE;
}
if ("W" %in% tests) {
rot.tests[match("W",tests)] = TRUE;
}
P.l = GS$low;
P.h = GS$high;
P = rep(NA,nrow(P.l)*ncol(P.l));
dim(P) = dim(P.l); # two-sided p-values
if (length(which(!rot.tests)) > 0) {
P.l[,!rot.tests] = m.combine(P.l[,!rot.tests],miR.h,fisher.combination) ### p-value for up-regulation in miRNA
P.h[,!rot.tests] = m.combine(P.h[,!rot.tests],miR.l,fisher.combination) ### p-value for down-regulation in miRNA
P[,!rot.tests] = apply(2*pmin(P.l[,!rot.tests,drop=FALSE],P.h[,!rot.tests,drop=FALSE]),c(1,2),min,1);
}
if (length(which(rot.tests)) > 0) {
P.l[,rot.tests] = m.combine(P.l[,rot.tests],miR.h,inverse.normal.combination);
P.l[,rot.tests] [abs(P.l[,rot.tests] - miR.h) == 1] = 1; ### remove NaN that results from combining 0 and 1
P.h[,rot.tests] = m.combine(P.h[,rot.tests],miR.l,inverse.normal.combination);
P.l[,rot.tests] [abs(P.h[,rot.tests] - miR.l) == 1] = 1; ### remove NaN that results from combining 0 and 1
P[,rot.tests] = pmax(P.l[,rot.tests],P.h[,rot.tests]); ### as rotation tests can return 1 we have to take the maximum here
}
P = apply(P,2,p.adjust,method=adjust);
colnames(P) = tests;
if (!is.null(colnames(A))) {
if(allocation.matrix) {
rownames(P) = colnames(A)
} else {
rownames(P) = rownames(X)
}
}
if (is.null(dim(P)) || ncol(P) == 1) colnames(P) = "miRtest"
P;
}
|
ebb5dfc48bfd5c70084f6a478e3265df7c962642
|
c33af3f2dd80f61baef3be46eb25421ec38af4bc
|
/man/nntsloglikSymmetric.Rd
|
c4d2b5de4d4c87ba41398c8b63f53253e8d9f876
|
[] |
no_license
|
cran/CircNNTSR
|
51cb9cf39ab32a5476e252e3711ee0d89e35fdd5
|
ee53f05ee5c5e98fddd0791d452c6b622127e7c0
|
refs/heads/master
| 2021-01-18T21:47:10.362545
| 2020-02-18T04:10:02
| 2020-02-18T04:10:02
| 17,678,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,616
|
rd
|
nntsloglikSymmetric.Rd
|
\name{nntsloglikSymmetric}
\Rdversion{0.1}
\alias{nntsloglikSymmetric}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{NNTS symmetric log-likelihood function}
\description{Computes the log-likelihood function with NNTS symmetric density for the data}
\usage{nntsloglikSymmetric(cpars = c(0, 0), M = 0, data)}
\arguments{
\item{cpars}{Vector of real numbers of dimension M+1. The first M numbers are the squared moduli of the c parameters.
The sum must be less than 1/(2*pi). The last number is the mean of symmetry}
\item{M}{Number of components in the NNTS}
\item{data}{Vector with angles in radians. The first column is used if data are a matrix}
}
%\details{
%% ~~ If necessary, more details than the description above
%}
\value{The function returns the value of the log-likelihood function for the data}
\references{
Fernandez-Duran, J.J., Gregorio-Dominguez, M.M. (2009) Symmetric Circular Distributions Based on Nonnegative Trigonometric Sums. Working Paper, DE-C09.12, Department of Statistics, ITAM, Mexico
}
\author{
Juan Jose Fernandez-Duran and Maria Mercedes Gregorio-Dominguez
}
\note{The default values provide the Uniform circular log-likelihood for the data
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
nntsloglikSymmetric(c(.01,.02,2),2,t(c(pi,pi/2,2*pi,pi)))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
44eb938dd73bd2c34e22bd0be15944db87cb47da
|
c58a0407c213283cc8e32aa97f8fe167f7c416ff
|
/plot1.R
|
4d91c27b79a7e8cd22e630fe14c357991020737a
|
[] |
no_license
|
HeberTU/ExData_Plotting1
|
38a04c46374f586ee147f8e9834e278950b1c843
|
57576d515b2a5a18b197ff91eff867f1aa19bc13
|
refs/heads/master
| 2020-03-29T07:59:17.137446
| 2017-06-18T06:34:13
| 2017-06-18T06:34:13
| 94,664,782
| 0
| 0
| null | 2017-06-18T04:23:26
| 2017-06-18T04:23:26
| null |
UTF-8
|
R
| false
| false
| 489
|
r
|
plot1.R
|
library(dplyr)
setwd("D:/Data/Academic/Coursera/R/02.-Data Scients with R/04.-Exploratory Data Analysis/W1/10.-Course Project")
db = read.table("household_power_consumption.txt",header = TRUE,sep=";",
na.strings = "?")
db_fltr<-db%>%
filter(Date=="2/2/2007"|Date=="1/2/2007")
png(file="plot1.png", width = 480, height = 480)
hist(db_fltr$Global_active_power,col="Red",main = "Global Active Powe"
,xlab="Global Active Power (kilowatts)" )
dev.off()
|
e508528ca107b80e07e60708032b2e93d4f1e3f2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qiimer/examples/dist_groups.Rd.R
|
6f62bba19f7d9bb7f1542bc7dbdadc9e6575f7a8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 238
|
r
|
dist_groups.Rd.R
|
library(qiimer)
### Name: dist_groups
### Title: Create a data frame of distances between groups of items.
### Aliases: dist_groups
### ** Examples
data(relmbeta_dist)
data(relmbeta)
head(dist_groups(relmbeta_dist, relmbeta$Diet))
|
70dafe8b50816e0805579ad56c25f21664b5431c
|
a0549180a550c7241130ebc84b2693b0935c011a
|
/man/ti_projected_monocle.Rd
|
05562e4e0e962207954473d9f08fe493c8343e16
|
[] |
no_license
|
Feigeliudan01/dynmethods
|
7cc2bde206e1c0489d3bd0ab3cd0505ddfeae3c5
|
53c6d1d486887692e513cc8537c9caf9ca107160
|
refs/heads/master
| 2020-08-30T21:50:40.163847
| 2019-07-03T09:32:38
| 2019-07-03T09:32:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,846
|
rd
|
ti_projected_monocle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ti_projected_monocle.R
\name{ti_projected_monocle}
\alias{ti_projected_monocle}
\title{Projected Monocle}
\usage{
ti_projected_monocle(reduction_method = "DDRTree", max_components = 2L,
norm_method = "vstExprs", auto_param_selection = TRUE,
filter_features = TRUE, filter_features_mean_expression = 0.1)
}
\arguments{
\item{reduction_method}{A character string specifying the algorithm to use for
dimensionality reduction. Domain: {DDRTree}. Default: DDRTree. Format:
character.}
\item{max_components}{The dimensionality of the reduced space. Domain: U(2,
20). Default: 2. Format: integer.}
\item{norm_method}{Determines how to transform expression values prior to
reducing dimensionality. Domain: {vstExprs, log, none}. Default: vstExprs.
Format: character.}
\item{auto_param_selection}{When this argument is set to TRUE (default), it
will automatically calculate the proper value for the ncenter (number of
centroids) parameters which will be passed into DDRTree call. Default: TRUE.
Format: logical.}
\item{filter_features}{Whether to include monocle feature filtering. Default:
TRUE. Format: logical.}
\item{filter_features_mean_expression}{Minimal mean feature expression, only
used when \code{filter_features} is set to TRUE. Domain: U(0, 10). Default: 0.1.
Format: numeric.}
}
\value{
A TI method wrapper to be used together with
\code{\link[dynwrap:infer_trajectories]{infer_trajectory}}
}
\description{
Will generate a trajectory using \href{https://github.com/cole-trapnell-lab/monocle-release}{Projected Monocle}.
This method was wrapped inside a
\href{https://github.com/dynverse/ti_projected_monocle}{container}.
The original code of this method is available
\href{https://github.com/cole-trapnell-lab/monocle-release}{here}.
}
\keyword{method}
|
ae545957923f2cc753b22cca92d393473b73d4e3
|
0613b060b9128653edbc1ba023f23fad3cb264f8
|
/man/checkped.Rd
|
0d35ce58491a0e8c1f8a2844a04695f1c8827931
|
[] |
no_license
|
luansheng/visPedigree
|
dc3526b2567b0ffb18b3f259eb89a6fc6603b534
|
ae4692d0d52d3f3b3d4cb7c1f945d5538c3eadd4
|
refs/heads/master
| 2023-01-23T07:35:21.733015
| 2023-01-12T09:05:45
| 2023-01-12T09:05:45
| 149,978,617
| 14
| 8
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,067
|
rd
|
checkped.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkped.R
\name{checkped}
\alias{checkped}
\title{Check a pedigree}
\usage{
checkped(ped, addgen = TRUE)
}
\arguments{
\item{ped}{A data.table or data frame including the pedigree, which including the first three columns: \strong{individual}, \strong{sire} and \strong{dam} IDs. More columns, such as sex, generation can be included in the pedigree file. Names of the three columns can be assigend as you would like, but their orders must be not changded in the pedigree. Individual ID should not be coded as "", " ", "0", asterisk, and "NA", otherwise these individuals will be deleted from the pedigree. Missing parents should be denoted by either "NA", "0", asterisk. Space and "" will also be recoded as missing parents, but not be recommended.}
\item{addgen}{A logical value indicates whether individual generation number will be generated. The default values is TRUE, then a new column named \strong{Gen} will be added in the returned data.table.}
}
\value{
A data.table including the checked pedigree is returned. Individual, sire and dam ID columns are renamed as \strong{Ind}, \strong{Sire} and \strong{Dam}. Missing parent is replacted with the default missing value \strong{NA}. The column \strong{Sex} includes individuals' sex (male or female, NA for unknown sex). The column \strong{Gen} will be included when the parameter \emph{addgen} is TRUE. Ind, Sire, Dam and Sex columns are character; The Gen column is integer.
}
\description{
\code{checkped} function checks a pedigree.
}
\details{
This function takes a pedigree, detects missaing parents, checks duplicated, bisexual individuals, adds missing founders, and sorts the pedigree. All individuals's sex will be inferred if there is not sexual information in the pedigee. If the pedigree includes the column \strong{Sex}, then individuals's sexes need to be recoded as "male", "female", or NA (unknown sex). Missing sexes will be identified from the pedigree structure and be added if possible.
}
\keyword{internal}
|
20515fadcca1ffee6a0d69ce0857e40f9549dae1
|
75a8669174b81d336147dc34f38c716c00064ac6
|
/Reporting/SynopticEventsAnimation/RainResponse.R
|
b3125cf60e783693fa4ee5024611932aabf6036b
|
[] |
no_license
|
feralindia/CWC
|
da54305dabc973014c94ce762db9f27627f98741
|
36148bc90b5d86b7f7d6dc467ee769171fda6c19
|
refs/heads/master
| 2021-01-17T08:20:24.750716
| 2018-04-09T05:57:05
| 2018-04-09T05:57:05
| 38,206,355
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,990
|
r
|
RainResponse.R
|
## Get "n" highest periods of rainfall for all raingauges
## Merge across timestamps keeping raingauge IDs
## Merge with spatial attributes
## Merge with wind direction and speed data
## Merge with ground level and 0.6m gauge
##---- Pull out period of X highest rainfall events defined in function read.max.rain
## merge the topographic data with the file
in.files <- ls(pattern = "RainFiles")
in.files <- in.files[grep(pattern=site, x=in.files)]
for(n in 1: length(in.files)){
in.filename <- in.files[n]
y <- get(in.files[n])
## loggers giving trouble can be removed here
y <- remove.logger("tbrg_125a")
prd <- unlist(strsplit(in.filename, split="_"))[3]
outfldr <- paste("sel_", gsub(pattern = " ", replacement = "", prd), sep="")
for(m in 1:nrow(y)){
full.filename <- as.character(y$fn.full[m])
short.filename <-as.character(y$fn.short[m])
tmp <- read.max.rain(full.filename, short.filename)
tmp <- add.topoinfo(tmp)
tmp1 <- tmp
z <- y[-m,]
## get data for all other stations for max rain timestamps of this logger
for(o in 1:nrow(z)){
full.flnm <- as.character(z$fn.full[o])
short.flnm <-as.character(z$fn.short[o])
tmp2 <- read.othermax.rain(full.flnm, short.flnm)
tmp2 <- add.topoinfo(tmp2)
tmp1 <- rbind(tmp1, tmp2)
}
tmp1 <- tmp1[complete.cases(tmp1$x),]
##--- name output files for each logger
outfile.csv <- paste(site, "/", outfldr, "/", short.filename, ".csv", sep = "")
outfile.png <- paste(site, "/", outfldr, "/",short.filename, ".png", sep = "")
##--- plot
png(filename = outfile.png, width = 1200, height = 800)
OP <- par( mar=c(0,0,0,0), mfrow = c(2,round(max.event/1.9)))##
for(l in 1:max.event){
dat <- subset(tmp1, subset=(tmp1$Rank==l))
if(nrow(dat)>0){
## modified from:
## <http://personal.colby.edu/personal/m/mgimond/Spatial/Interpolation.html>
coordinates(dat) <- c("x","y")
## plot(dat, pch=16, , cex=( (dat$mm/10)))
## text(dat, as.character(dat$mm), pos=3, col="grey", cex=0.8)
dat <- dat[complete.cases(dat$mm),]
## Create an empty grid where n is the total number of cells
grd <- as.data.frame(spsample(dat, "regular", n=10000))
names(grd) <- c("x", "y")
coordinates(grd) <- c("x", "y")
gridded(grd) <- TRUE # Create SpatialPixel object
fullgrid(grd) <- TRUE # Create SpatialGrid object
## Interpolate the surface using a power value of 2 (idp=2.0)
dat.idw <- idw(mm~1,dat,newdata=grd,idp=2.0)
## Plot the raster and the sampled points
par(cex.main = 1, col.main = "red", mar=c(0,0,1.2,0))
image(dat.idw,"var1.pred",col=terrain.colors(20))
contour(dat.idw,"var1.pred", add=TRUE, nlevels=10, col="#656565")
plot(dat, add=TRUE, pch=16, cex=0.5, col="blue")
points(dat[dat$Unit_ID==tmp$Unit_ID[1] , ],
cex=1.5, pch=16, col="red")
title(paste("Rank", l, tmp$dt.tm[l]), line = 0.15)
text(coordinates(dat), as.character(round(dat$mm,1)),
pos=4, cex=0.8, col="blue")
box(lty = 'solid',col="gray")
}
}
par(OP)
martxt <- paste(tmp$Unit_ID[1], prd, sep="--")
mtext(martxt, side = 3, cex = 1, line=3.25)
dev.off()
assign(as.character(y$fn.short[m]), tmp1)
write.csv(file=outfile.csv, tmp1)
cat(paste("Top", max.event, "rainfall events for", as.character(y$fn.short[m]), "processed."),"\n")
}
cat(paste("Files for ", in.files[n], "processed."),"\n")
}
|
a2e8e34589ab7ec553ac981fa58ede6b1d8e483b
|
5e9bde2328f48b94e97bc00af73f85b3c2a7d358
|
/scripts/join_cities_boundaries.R
|
3aacc6fea5eb009ef1eac7d27b5cf005e1e6347f
|
[] |
no_license
|
chrisleboa/sc-evictions
|
974f150ab359db5062af5368d1019b6edc62e6e4
|
8ec491c4e79575ecc7198aeacb1727b53f75fb6f
|
refs/heads/master
| 2020-12-24T02:03:02.811021
| 2020-01-31T04:42:01
| 2020-01-31T04:42:01
| 237,344,518
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 790
|
r
|
join_cities_boundaries.R
|
# This script joins the data for 2016 city evictions and city boundary data
# for south carolina
# Author: Chris LeBoa
# Version: 2020-01-30
# Libraries
library(tidyverse)
# Parameters
file_path_input_cities <- here::here("data/sc_cities_2016.rds")
file_path_input_boundaries <- here::here("data/sc_boundaries.rds")
file_path_output <-
here::here("data/sc_joined_cities_2016_boundaries.rds")
#===============================================================================
Code
cities <- read_rds(file_path_input_cities)
boundaries <- read_rds(file_path_input_boundaries)
#I am not sure if doing this is duplicative coding and if would be better to
#just name them cities and boundaries above
cities %>%
left_join(boundaries, by = "city_id") %>%
write_rds(file_path_output)
|
60fb1208e72df87e54ecb28c6fb3a569d88dbc84
|
95e8a55975508f823758eccdbc6c8e041491724f
|
/man/percent.Rd
|
1f3598712072a5e9d75b3372c76659bb5c62ee70
|
[] |
no_license
|
rickdonnelly/pcvmodr
|
f2a1f4306efde60997039e0c6309dc48d7b1e451
|
af3a333174c5a0fc1afb93684ae5fe5aa6fa5513
|
refs/heads/master
| 2020-09-15T12:56:00.432031
| 2019-05-02T18:07:23
| 2019-05-02T18:07:23
| 94,478,198
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,123
|
rd
|
percent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/percent.R
\name{percent}
\alias{percent}
\title{Calculate the percent value of a vector (stunning this isn't built-in to R)}
\usage{
percent(x, y = sum(x), places = 1)
}
\arguments{
\item{x}{Value or vector of number to calculate as percentage of y}
\item{y}{The total to calculate percent x from, by default the sum of the
value(s) of x}
\item{places}{Number of places after the decimal to round the result to
(default value is 1)}
}
\description{
Calculate the percent value of a vector (stunning this isn't built-in to R)
}
\details{
This function calculates percentages, generally on values in data
frame field. If you'd like the values to be normalized simply divide the
result by 100. To use this function to calculate percent change simply
code the current or future value as x and prior value to y as x-prior.
}
\examples{
df$pct_varname <- percent(df$varname) # Simple percentage
df$pct_varname <- percent(df$varname)/100.0 # Normalized value
df$pct_change <- percent(df$current, (df$current - df$prior), 3) # \% change
}
|
4aa8159c5cc32aa727a0fb0416b49892a0e3b56a
|
96e944ee2b65cf15aee1772265a9134ed8ed9640
|
/R/caller.R
|
8e08e93945ecfea169f7922abde3558be65308f3
|
[] |
no_license
|
rcodo/nseval
|
3b906346a0c5691224595552d9d8c7752440f74d
|
3384910f8403f4cf5839da7ba947100f6843917e
|
refs/heads/master
| 2021-09-20T09:46:51.200827
| 2018-08-08T04:57:37
| 2018-08-08T04:57:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,206
|
r
|
caller.R
|
#' Find the caller of a given environment.
#'
#' Given an environment that is currently on the stack, `caller`
#' determines the calling environment.
#'
#' For example, in the code:
#'
#' ```
#' X <- environment()
#' F <- function() {
#' Y <- environment()
#' caller(Y)
#' }
#' F()
#' ```
#'
#' the environment called `Y` was created by calling `F()`, and that
#' call occurs in the environment called `X`. In this case `X` is the
#' calling environment of `Y`, so `F()` returns the same environment
#' as `X()`.
#'
#' `caller` is intended as a replacement for [parent.frame], which
#' returns the next environment up the calling stack -- which is
#' sometimes the same value, but differs in some cases such as when
#' lazy evaluation re-activates an environment. `parent.frame()` can
#' return different things depending on the order in which arguments
#' are evaluated, and without warning. `caller` will by default throw
#' an error if the caller cannot be determined.
#'
#' In addition, `caller` tries to do the right thing when the
#' environment was instantiated by means of `do.call`, [eval] or
#' [do] rather than an ordinary function call.
#'
#' @param env The environment whose caller to find. The default is
#' `caller`'s caller; that is, `caller()` should return the the same
#' value as `caller(environment())`.)
#' @param ifnotfound What to return in case the caller cannot be
#' determined. By default an error is raised.
#' @return The environment which called `env` into being. If that
#' environment cannot be determined, `ifnotfound` is returned.
#'
#' @export
#'
#' @examples
#' E <- environment()
#' F <- function() {
#' Y <- environment()
#' caller(Y)
#' }
#' identical(F(), E) ## TRUE
caller <- function(env = caller(environment()),
ifnotfound = stop("caller: environment not found on stack")) {
## I think we want to find the activation record that corresponds
## to the *earliest* invocation of our environment, and look at its
## sys.parent.
##
## Doing that from this side of Rinternals.h is tricky. Doubly so
## since sys.calls() and sys.frame() elide some of the activation
## records. I wrote a small package "stacktrace" which helped to
## figure this out.
## cat("getting caller of ", format(envir), "\n")
## print(stacktrace(), max.width=80);
## print(df(frames = oneline(as.list(sys.frames())),
## parents = sys.parents(),
## calls = oneline(as.list(sys.calls()))),
## max.width=80)
where <- which_frame(env, ifnotfound)
if (is.primitive(sys.function(where))) {
if (is_default(ifnotfound)) {
stop("caller: calling function is a primitive, which has no environment")
} else ifnotfound
}
whichparent <- sys.parents()[where]
if (whichparent == where) {
# The env we are querying appears to be on the stack, but
# sys.parents() reports it as its own parent, which, I think, is
# what it does when the real parent is elided out for some reason.
#
# The answer I need is in sysparent, but I have no .Rinternals-level
# way to access sysparent (beyond sys.parent() which mangles it.)
#
# BUT, existing parent.frame uses sysparent.
#
# do.call will make a stack frame that has the right sysparent, which
# parent.frame will return.
result <- do.call(parent.frame, list(), envir=env)
# Do I really need do.call for this? What's the way for
# NSE to directly call with a builtin?
} else if(whichparent == 0) {
result <- globalenv()
} else {
result <- sys.frame(whichparent)
}
# cat("Result: ", format(result), "\n")
result
}
#' Making function calls, with full control of argument scope.
#'
#' The functions `do` and `do_` construct and invoke a function call.
#' In combination with [dots] and [quotation] objects they allow you
#' to control the scope of the function call and each of its arguments
#' independently.
#'
#' For `do_` all arguments should be `quotation` or `dots` objects, or
#' convertible to such using `as.quo()`. They will be concatenated
#' together by [c.dots] to form the call list (a `dots` object).
#' For `do` the first argument is quoted literally, but the
#' rest of the arguments are evaluated the same way as do_.
#'
#' The first element of the call list represents the function, and it
#' should evaluate to a function object. The rest of the call list is
#' used as that function's arguments.
#'
#' When a quotation is used as the first element, the call is evaluated
#' from the environment given in that quotation. This means that calls
#' to [caller()] (or `parent.frame()`) from within that function
#' should return that environment.
#'
#' `do` is intended to be a replacement for base function [do.call].
#'
#' @note Special builtins, such as ( [`<-`], or [`for`])
#' may require that they are called from the same environment as
#' their args.
#' @seealso get_call do.call match.call
#'
#' @param ... All arguments are concatenated using `c.dots()`. The
#' first element of the resulting list is taken as a function to
#' call, the rest as its arguments.
#'
#' @return The return value of the call.
#' @export
do <- function(...) {
d <- dots(...)
d[[1]] <- forced_quo(arg(..1)) #unwrap and then insulate from forcing
d <- do_(quo(c.dots), d) #force dots and concatenate
do__(d)
}
#' @rdname do
#' @useDynLib nseval _do
#' @export
do_ <- function(...) {
d <- c.dots(...)
do__(d)
}
do__ <- function(d) {
.Call("_do", d)
}
#' Get information about currently executing calls.
#'
#' `get_call(env)`, given an environment associated with a currently
#' executing call, returns the function call and its arguments, as a
#' [dots] object. To replicate a call, the [dots] object returned can
#' be passed to [do].
#'
#' `get_call` is meant to replace [`match.call`] and [`sys.call`];
#' its advantage is that it captures the environments bound to
#' arguments in addition to their written form.
#'
#' @return `get_call` returns a [dots] object, the first element of
#' which represents the function name and [caller] environment.
#' @seealso do dots caller
#' @export
#' @param env An environment belonging to a currently executing
#' function call. By default, the [caller] of get_call itself
#' (so `get_call()` is equivalent to `get_call(environment())`.)
#' @param ifnotfound What to return if the call is not found. By
#' default an error is thrown.
#' @examples
#' # We might think of re-writing the start of [lm] like so:
#' LM <- function(formula, data, subset, weights, na.action, method = "qr",
#' model = TRUE, x = FALSE, y = FALSE, qr = TRUE, singular.ok = TRUE,
#' contrasts = NULL, offset, ...) {
#' cl <- get_call()
#' mf <- do(model.frame,
#' arg_list(formula, data, subset, weights, na.action, offset))
#'
#' z <- get.call()
#'
#' class(z) <- c("LM", class(z))
#' z$call <- cl
#' z
#' }
#'
#' # and `update` like so:
#' update.LM <- function(object, formula., ...) {
#' call <- object$call
#' extras <- dots(...)
#' call$formula <- forced_quo(update.formula(formula(object), formula.))
#' do(call)
#' }
get_call <- function(env = caller(environment()),
ifnotfound = stop("get_call: environment not found on stack")) {
frame <- which_frame(env, ifnotfound)
rho <- caller(env)
call <- sys.call(frame)
head <- call[[1]]
fn <- sys.function(frame)
argnames <- names(formals(fn))
c.dots(quo_(head, rho),
env2dots(env, argnames));
}
#' `get_function(env)` finds the function object associated with a
#' currently executing call.
#'
#' `get_function` is similar to [`sys.function`], but is keyed by
#' environment rather than number.
#' @return `get_function` returns a closure.
#' @rdname get_call
#' @export
get_function <- function(env = caller(environment()),
ifnotfound = stop("get_function: environment not found on stack")) {
sys.function(which_frame(env, ifnotfound))
}
which_frame <- function(env, ifnotfound) {
frames <- sys.frames()
where <- which(vapply(frames, identical, FALSE, env))
if (length(where) == 0) {
ifnotfound
} else {
where[1]
}
}
|
80deecb0f6e9fdf30baa6ba37eda339313422f5e
|
b824dbf13f3d7c39a26a552e7f070402e720cfcc
|
/plot3.R
|
2123549895700ff19c9dcaab33bce3a2f01430c0
|
[] |
no_license
|
szamehlg/ExData_Plotting1
|
117699e445b83b375047df38a6ef223667ab59e3
|
8e818b86dbc0ffd554e7681b907cfe33ed8c5bd2
|
refs/heads/master
| 2020-12-26T11:15:50.316082
| 2014-11-08T21:29:40
| 2014-11-08T21:29:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,143
|
r
|
plot3.R
|
###
## Exploratory Data Analysis, Course Project 1: Generation of Plot 3.
##
## File 'household_power_consumption.txt' should be in the working directory.
##
## If not, this script contains code which will download and unzip
## the raw data into 'household_power_consumption.txt' - on a
## Windows machine only due to the required call to 'setInternet2'.
###
#
# Set the working directory. Adjust to your needs, if necessary...
#
setwd("~/coursera/exploratory_data_analysis/project1")
#
# Remove (almost) everything in the working environment.
# Each 'plot<x>.R' script runs completely separate as requested.
#
rm(list = ls())
#
# Try to download and unzip the raw data if not present in the
# working directory - on a windows machine only.
#
zip_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zip_name <- "exdata_data_household_power_consumption.zip"
file_name <- "./household_power_consumption.txt"
if (!file.exists(file_name)) {
if (Sys.info()["sysname"] == "Windows") {
# Prepare the download.
setInternet2(use=TRUE) ## Windows only!
# zip files must be downloaded in binary mode.
download.file(zip_url, destfile = zip_name, mode = "wb")
# Unzip to create the txt file containing the raw data.
unzip(zip_name)
}
else
stop("Raw data is not found!")
}
#
# Load the 'Individual household electric power consumption Data Set' (see README.md for
# details: Dataset, Description, descriptions of the 9 variables and the hint that
# missing values are coded as '?'s).
#
# According to the instructions in section 'Loading the data', only data from the dates
# 2007-02-01 and 2007-02-02 will be used. Because the dataset contains 2,075,259 rows only
# 5,000 lines are read from the file so that the date varies from 30/1/2007 in the first
# line to 3/2/2007 in the last line thus including 2007-02-01 and 2007-02-02 and keeping
# the resulting dataset handy.
# The file contains date and time values as strings.
#
dt <- read.table(file_name, header = TRUE, sep = ";", na.strings = "?",
skip = 65000, nrows = 5000, stringsAsFactors = FALSE)
#
# Provide proper variable names (all in lower case, to be tidy).
#
colnames(dt) <- tolower(c("Date", "Time", "Global_active_power", "Global_reactive_power",
"Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"))
#
# Reduce the dataset to rows containing data about the first and second of February, 2007.
#
dt <- dt[dt$date == "1/2/2007" | dt$date == "2/2/2007",] # 2,880 rows
#
# Convert the Date and Time variables to POSIXlt class instances
# (where date and time are combined) resulting in an additional
# dataset's column.
# - Date is given in format dd/mm/yyyy
# - Time is given in format hh:mm:ss
dt$datetime <- strptime(paste(dt$date, dt$time), "%d/%m/%Y %T")
#
# Check for NAs.
#
# Please note that dt (i.e. the dataset used for the plots) does not
# contain NAs so that no further action is required.
#
if (all(colSums(is.na(dt))==0)) {
print("dt is NA free.")
} else {
print("dt contains NAs!!!")
print(colSums(is.na(dt))) # Each column with a value > 0 contains at least one NA.
}
#
# The plot shows abbreviated weekdays - in English.
# Rember the language settings. Then set them to English.
#
my_locale <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English") # To print 'Thu/Fri/Sat' instead
# of German 'Do/Fr/Sa'.
#
# Create plot3 file.
#
png(file = "plot3.png")
par(mar = c(3.45, 4.1, 4.1, 0.4))
with(dt, plot(datetime, sub_metering_1, type = "n", xlab="",
ylab="Energy sub metering"))
with(dt, lines(datetime, sub_metering_1, col = "black"))
with(dt, lines(datetime, sub_metering_2, col = "#FB0007"))
with(dt, lines(datetime, sub_metering_3, col = "#0000FF"))
legend("topright", lwd = 1, col = c("black", "#FB0007", "#0000FF"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
#
# Restore language settings.
#
Sys.setlocale("LC_TIME", my_locale)
|
d304a6d0fd6d634ed1926fe635f859e608c46b8d
|
1b9b0984597d406c4729ba4e28a6c396ead15f1e
|
/man/download_24hr_average.Rd
|
c17be85733567a55e8271b27693b4e26bdfcb5a7
|
[
"BSD-3-Clause"
] |
permissive
|
diegovalle/aire.zmvm
|
d660ed8c8359d9d650dca3cce935a5c750e83b89
|
2fa88498496cf93023faa3d20f2d345a1ef6d065
|
refs/heads/master
| 2021-11-18T20:51:18.515842
| 2021-08-13T20:52:14
| 2021-08-13T20:52:14
| 54,172,869
| 10
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,123
|
rd
|
download_24hr_average.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_archives.R
\name{download_24hr_average}
\alias{download_24hr_average}
\title{Download archives of the 24 hour averages of pollutants}
\usage{
download_24hr_average(type, year, progress = interactive())
}
\arguments{
\item{type}{type of data to download.
\itemize{
\item{"SO2"}{ - Sulfur Dioxide (parts per billion)}
\item{"PS"}{ - Suspended solids}
}}
\item{year}{a numeric vector containing the years for which to download data
(the earliest possible value is 1986 for SO2 and 1995 for PS)}
\item{progress}{whether to display a progress bar (TRUE or FALSE).
By default it will only display in an interactive session.}
}
\value{
A data.frame with pollution data.
}
\description{
Data comes from
\href{http://www.aire.cdmx.gob.mx/default.php?opc=\%27aKBhnmI=\%27&opcion=ag==}{Promedios de 24 horas de partículas suspendidas(PM10 Y PM2.5)} and
\href{http://www.aire.cdmx.gob.mx/default.php?opc=\%27aKBhnmI=\%27&opcion=aQ==}{Promedios de 24 horas de Dióxido azufre}
}
\examples{
\dontrun{
head(download_24hr_average("PS", 2017))
}
}
|
f22d71f5d7015db334c8864cf5aa109c146960bb
|
778e03320cd7cc4e0b17e829ff3ece07725f1d86
|
/R/bubbleplot-package.R
|
8357ac7cabc8128798ca1986ed367fb1bf3e814a
|
[] |
no_license
|
arni-magnusson/bubbleplot
|
abb3c809a5ebd4d44c9d754d312c70350b4c3d9d
|
8a0f49b11ed7c626b3ec94a571ced259c9da7c16
|
refs/heads/master
| 2022-05-01T09:50:01.332412
| 2022-03-20T12:18:18
| 2022-03-20T12:18:18
| 222,415,625
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 709
|
r
|
bubbleplot-package.R
|
#' @docType package
#'
#' @name bubbleplot-package
#'
#' @title Bubble Plot, a Scatter Plot with Varying Symbol Sizes and Colors
#'
#' @description
#' Draw a bubble plot, a scatterplot with varying symbol sizes and colors, or
#' add points to existing plots. A variety of input formats are supported,
#' including vectors, matrices, data frames, formulas, etc.
#'
#' @details
#' \emph{Plot:}
#' \tabular{ll}{
#' \code{\link{bubbleplot}} \tab Bubble plot\cr
#' }
#' \emph{Examples:}
#' \tabular{ll}{
#' \code{\link{catch.d}} \tab Catch at age data frame\cr
#' \code{\link{catch.m}} \tab Catch at age matrix\cr
#' \code{\link{catch.r}} \tab Catch at age residuals
#' }
#'
#' @author Arni Magnusson.
NA
|
3854ae2e0ad83625adb3ef583bc8e05683c272b8
|
6e910000ab0b80f097ba60724834c2ca46830e9f
|
/supervised /classification_code/bikeshare/models_nongrid.R
|
a907db395ce0ce5975f9e4098b837d30ab751992
|
[
"BSD-3-Clause"
] |
permissive
|
taposh/mlearning
|
4d65f10ceb57ac13ba45bb357dfa4d0401190020
|
1f33c4223c0a33589530a16bac32a1016231737b
|
refs/heads/master
| 2022-11-01T16:47:42.781101
| 2022-10-18T20:12:52
| 2022-10-18T20:12:52
| 28,143,470
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,022
|
r
|
models_nongrid.R
|
#--------------------------------------------------------
# Non-Grid based models for kaggle-bike-sharing
# Taposh Roy
# @taposh_dr
#--------------------------------------------------------
#For counts
train_factor <- cbind(countresult,bike)
colnames(train_factor)[1] <- "count"
colnames(train_factor)
#For causal
train_factor_causal <- cbind(causal,bike)
colnames(train_factor_causal)[1] <- "causal"
colnames(train_factor_causal)
#For registered
train_factor_registered <- cbind(registered,bike)
colnames(train_factor_registered)[1] <- "registered"
colnames(train_factor_registered)
#Columns Created
colnames(bike)
colnames(test)
#write.table(train_factor,file="train_factors_h2o.csv",row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
#write.table(test,file="test_factors_h2o.csv",row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
###################
###Result Analysis
##################
result.train<-c()
result.train <- data.frame(countresult)
head(result.train)
colnames(result.train) <- c("actual")
result.stat<-c()
result.test<-c()
#forumla_count <-count ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + hour + month + dayof + temp2 + atemp2 + humid2 + windspeed2
forumla_count <-count ~.
#forumla_causal <-causal ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + hour + month + dayof + temp2 + atemp2 + humid2 + windspeed2
forumla_causal <-causal ~.
#forumla_registered <-registered ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + hour + month + dayof + temp2 + atemp2 + humid2 + windspeed2
forumla_registered <-registered ~.
#times<-paste(test[,"datetime"])
##################################
# Method 1 : randomForest
##################################
#####RANDOM FOREST STARTS HERE#########
#variables
myNtree = 600
myMtry = 15
myImportance = TRUE
#set the random seed
set.seed(415)
#Counts
countFit <- randomForest(forumla_count, data=train_factor, ntree=myNtree, mtry=myMtry, importance=myImportance,na.action = na.omit)
train_count_predict_1 <- predict(countFit,bike)
test_count_predict_1 <- predict(countFit,test)
compare.rf.bike.count <- cbind(train_factor[,1],train_count_predict_1)
head(compare.rf.bike.count,10)
auc_Counts <-auc(train_factor[,1],train_count_predict_1)
rmsle_Counts_rf<-rmsle(train_factor[,1],train_count_predict_1)
rmsle_Counts_rf
myroc <- roc(train_factor[,1],train_count_predict_1)
myci <- ci(train_factor[,1],train_count_predict_1)
plot(myroc)
####create output file from dataset test with predictions
test_dt<-read.csv("test.csv")
test_dt<-(as.data.frame(test_dt))
submit <- data.frame (datetime = test_dt$datetime, count =test_count_predict_1 )
head(submit)
curtime <- Sys.time()
timestamp <- strftime(curtime,"%Y-%m-%d-%H-%M-%S")
timestamp2 <-paste("Submission_rf_method1_count",timestamp,".csv", sep="_")
write.table(submit,file=timestamp2,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
#fit and predict casual
casualFit <- randomForest(forumla_causal, data=train_factor_causal, ntree=myNtree, mtry=myMtry, importance=myImportance,na.action = na.omit)
train_causal_predict <- predict(casualFit,bike)
test_causal_predict <- predict(casualFit, test)
#fit and predict registered
registeredFit <- randomForest(forumla_registered, data=train_factor_registered, ntree=myNtree, mtry=myMtry, importance=myImportance,na.action = na.omit)
train_registered_predict <- predict(registeredFit, bike)
test_registered_predict <- predict(registeredFit, test)
#add both columns into final count, round to whole number
#### RESULT ##############
result.train <- cbind(result.train,rf=train_count_predict_1)
result.test <- cbind(result.test,rf=test_count_predict_1)
result.train <- cbind(result.train,rf=train_causal_predict)
result.test <- cbind(result.test,rf=test_causal_predict)
result.train <- cbind(result.train,rf=train_registered_predict)
result.test <- cbind(result.test,rf=test_registered_predict)
head(test_causal_predict)
head(test_registered_predict)
test_count_predict <- cbind(test_causal_predict+test_registered_predict)
train_count_predict <- cbind(train_causal_predict+train_registered_predict)
compare.rf.bike <- cbind(train_factor[,1],train_count_predict)
head(compare.rf.bike,10)
auc_Counts <-auc(train_factor[,1],train_count_predict)
rmsle_Counts<-rmsle(train_factor[,1],train_count_predict)
rmsle_Counts
myroc3 <- roc(train_factor[,1],train_count_predict)
myci2 <- ci(train_factor[,1],train_count_predict)
plot(myroc3)
test_result <- cbind(test_causal_predict+test_registered_predict)
head(test_result)
#testplot
#plot(test_result)
#plot(test_result)
test_dt<-read.csv("test.csv")
test_dt<-(as.data.frame(test_dt))
####create output file from dataset test with predictions
submit <- data.frame (datetime = test_dt$datetime, count =test_result )
head(submit)
curtime <- Sys.time()
timestamp <- strftime(curtime,"%Y-%m-%d-%H-%M-%S")
timestamp2 <-paste("Submission_rf_method1",timestamp,".csv", sep="_")
write.table(submit,file=timestamp2,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
##################################
# Method 2 : randomForest
##################################
require(randomForest)
set.seed(17)
mdl_rf <- randomForest(count ~., data=train_factor, mtry = 16,ntree=600, importance = TRUE,na.action = na.omit)
varImpPlot(mdl_rf)
#training data
prd.rf <- predict(mdl_rf, data=bike,type='response')
prd.rf <- as.data.frame(prd.rf)
compare.rf.bike <- cbind(train_factor[,1],prd.rf)
head(compare.rf.bike,10)
auc_Counts <-auc(train_factor[,1],prd.rf)
rmsle_Counts<-rmsle(train_factor[,1],prd.rf)
rmsle_Counts
myroc21 <- roc(train_factor[,1],prd.rf)
myci2 <- ci(train_factor[,1],prd.rf)
plot(myroc21)
#test data
prd.test.rf.bike <- predict(mdl_rf, newdata=as.data.frame(test),type='response')
bike.test.rf <- as.data.frame(prd.test.rf.bike)
head(bike.test.rf)
nrow(prd.rf)
head(prd.rf)
error<-sqrt((sum((train_factor[,1]-prd.rf)^2))/nrow(bike))
error
######################
# #Count Predictions
datetimes_test = as.data.frame(test_dt[,"datetime"])
colnames(datetimes_test) <- c("datetime")
nrow(datetimes_test)
nrow(prd.test.rf.bike)
Predictions<-cbind(datetimes_test,prd.test.rf.bike)
colnames(Predictions)<-c("datetime","count")
head(Predictions)
#### RESULT ##############
result.train <- cbind(result.train,rf=prd.rf)
result.test <- cbind(result.test,rf=prd.test.rf.bike)
head(result.train)
########################################################
## Output
########################################################
curtime <- Sys.time()
timestamp <- strftime(curtime,"%Y-%m-%d-%H-%M-%S")
timestamp2 <-paste("Submission_rf",timestamp,".csv", sep="_")
write.table(Predictions,file=timestamp2,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
x<-paste(timestamp2,"AUC",auc_Counts,"Logloss",rmsle_Counts,collapse=" ")
print(x)
write(x, file = "Results_compare.txt",append = TRUE, sep = " ")
########################################################
## Forumula
########################################################
#All factors determined
#forumla <- toString(paste("count ~",paste(unique(colnames(as.data.frame(bike))), collapse=" + ")))
forumla <-count ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + hour + month + dayof + temp2 + atemp2 + humid2 + windspeed2
#forumla <- count ~ season + holiday + workingday + weather + temp + atemp + humidity + windspeed + hour + month + dayof + temp2 + atemp2 + humid2 + windspeed2 + temp3 + atemp3 + humid3 + windspeed3 + logtemp + logatemp + sinhumid + coshumid + sinhumid2 + coshumid2 + sinhumid3 + coshumid3 + sinwind + coswind + sinwind2 + coswind2 + sinwind3 + coswind3 + btsaweather + btsaweathertemp + Number
# t1 + t3 + t5 + t6 + t8 + t9 + t11 + t13 + t14 + t15 + t16 + tt +
###################################
# Algorithms
##################################
head(bike)
##################################
# Method 1: GBM
##################################
library(gbm)
#Gradient Boosted Method
modelgbm<-gbm(count ~., data=as.data.frame(train_factor),distribution='poisson',n.trees =1500,train.fraction = 1.0,cv.folds=25,shrinkage = 0.001,interaction.depth=5)
#Build generalized boosted model (gradient boosted macine).
modelgbm
# check performance using an out-of-bag estimator
# OOB underestimates the optimal number of iterations
best.iter <- gbm.perf(modelgbm,plot.it = TRUE,overlay=TRUE,method="OOB",oobag.curve=TRUE)
print(best.iter)
#show(fit.gbm)
# gbmVarImp<-varImp(best.iter)
# plot(gbmVarImp)
# plot the performance # plot variable influence
#summary(modelgbm,n.trees=800) # based on the first tree
summary(modelgbm,n.trees=best.iter) # based on the estimated best number of trees
# compactly print the first and last trees for curiosity
#print(pretty.gbm.tree(modelgbm,1))
#print(pretty.gbm.tree(modelgbm,gbm$n.trees))
#bst <- gbm.perf(modelgbm,method="OOB") #Show plot of performance and store best
colnames(bike)
#colnames(train_factor)
predict.train.gbm <- predict(modelgbm, data=as.data.frame(bike), type="response")
comparegbm <- cbind(countresult,predict.train.gbm)
head(comparegbm)
#auc_gbm<-auc(countresult,predict.train.gbm)
rmsle_gbm <-rmsle(countresult,predict.train.gbm)
myrocgbm <- roc(countresult,predict.train.gbm)
plot(myrocgbm)
#Get prediction.
predict.test.gbm <- predict(modelgbm, data=test, type="response")
head(predict.test.gbm)
#### RESULT ##############
result.train <- cbind(result.train,gbm=predict.train.gbm)
result.test <- cbind(result.test,gbm=predict.test.gbm)
# curtime <- Sys.time()
# timestampgbm <- strftime(curtime,"%Y-%m-%d-%H-%M-%S")
# timestampgbm1 <-paste("Submission_gbm",timestampgbm,".csv", sep="_")
# write.table(predict.test.gbm ,file=timestampgbm1,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
#
# xgbm<-paste(timestampgbm1,"AUC",auc_gbm,"Logloss",rmsle_gbm,collapse=" ")
# print(xgbm)
# write(xgbm, file = "Results_compare.txt",append = TRUE, sep = " ")
##################################
# Method 3: Neural Net1
##################################
require(neuralnet)
mdl_nnet <- nnet( count~., data=train_factor, size=10,decay=5e-4,rang = 0.1, maxit=1000) #Build neural net.
mdl_nnet
train.prob.nnet<- compute(mdl_nnet,bike)
compare.bike <- cbind(train_factor[1],train.prob.nnet)
head(compare.bike)
test.prd.ann <- predict(mdl_nnet, newdata=test) #Get prediction.#Clean up.
compare.nn.blood <- cbind(train_factor[1],train.prob.nnet)
head(compare.nn.blood)
auc_Counts3 <-auc(train_factor[1],train.prob.nnet)
rmsle_Counts3<-rmsle(train_factor[1],train.prob.nnet)
rmsle_Counts3
myroc3 <- roc(train_factor[1],train.prob.nnet)
myci3 <- ci(train_factor[1],train.prob.nnet)
plot(myroc3)
head(test.prd.ann)
#### RESULT ##############
result.train <- cbind(result.train,neuralnet=train.prob.nnet)
result.test <- cbind(result.test,neuralnet=test.prd.ann)
# #Count Predictions
nnPredictions<-cbind(test[,"id"],test.prd.ann)
colnames(nnPredictions)<-c("id","Made Donation in March 2007")
head(nnPredictions)
########################################################
## Output
########################################################
curtime <- Sys.time()
timestamp <- strftime(curtime,"%Y-%m-%d-%H-%M-%S")
timestamp1 <-paste("Submission_combo_nn",timestamp,".csv", sep="_")
timestamp2 <-paste("Submission_nn",timestamp,".csv", sep="_")
write.table(Predictions_comb,file=timestamp1,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
write.table(Predictions,file=timestamp2,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
x<-paste(timestamp2,"AUC",auc_Counts,"Logloss",rmsle_Counts,collapse=" ")
print(x)
write(x, file = "Results_compare.txt",append = TRUE, sep = " ")
##
## Output
##
# curtime <- Sys.time()
# timestampnn <- strftime(curtime,"%Y-%m-%d-%H-%M-%S")
# timestampnn1 <-paste("Submission_nn",timestamp,".csv", sep="_")
#
# write.table(nnPredictions,file=timestampnn1,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
#
# x<-paste(timestampnn1,"AUC",auc_nn,"Logloss",rmsle_nn,collapse=" ")
# print(x)
# write(x, file = "Results_compare.txt",append = TRUE, sep = " ")
##################################
# Method : CTREE
##################################
source("ctree.R")
#head(Predictionsct)
head(result.train,10)
head(result.test)
########################################################
## Output
########################################################
curtime <- Sys.time()
timestamp <- strftime(curtime,"%Y-%m-%d-%H-%M-%S")
timestamp1 <-paste("Submission_combo_ctree",timestamp,".csv", sep="_")
timestamp2 <-paste("Submission_ctree",timestamp,".csv", sep="_")
write.table(Predictions_comb,file=timestamp1,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
write.table(Predictions,file=timestamp2,row.names=FALSE,quote=FALSE,sep=",",col.names=TRUE)
x<-paste(timestamp2,"AUC",auc_Counts,"Logloss",rmsle_Counts,collapse=" ")
print(x)
write(x, file = "Results_compare.txt",append = TRUE, sep = " ")
|
4be7909873bcb019ec8cffb5f5321bf89d1be0da
|
138a742c1c5602e77cc32d979c2d393edbc6df6f
|
/R/paste.R
|
bcc4d0d5c7240888336956f686bea89a02312305
|
[
"MIT"
] |
permissive
|
jonmcalder/dm
|
d7ba794cfd6962fa71e2b76023e0b6c5ebe7318d
|
1ec42058ab4e196e5265e499dfd2dd4b594ef9f7
|
refs/heads/master
| 2021-03-31T06:12:07.470091
| 2020-03-14T12:59:25
| 2020-03-14T12:59:25
| 248,084,785
| 0
| 0
|
NOASSERTION
| 2020-03-17T22:23:06
| 2020-03-17T22:23:05
| null |
UTF-8
|
R
| false
| false
| 3,248
|
r
|
paste.R
|
#' Create R code for a dm object
#'
#' `dm_paste()` takes an existing `dm` and emits the code necessary for its creation.
#'
#' @inheritParams dm_add_pk
#' @param select Boolean, default `FALSE`. If `TRUE` will produce code for reducing to necessary columns.
#' visualizing the tables.
#' @param tab_width Indentation width for code from the second line onwards
#'
#' @details At the very least (if no keys exist in the given [`dm`]) a `dm()` statement is produced that -- when executed --
#' produces the same `dm`. In addition, the code for setting the existing primary keys as well as the relations between the
#' tables is produced. If `select = TRUE`, statements are included to select the respective columns of each table of the `dm` (useful if
#' only a subset of the columns of the original tables is used for the `dm`).
#'
#' Mind, that it is assumed, that the tables of the existing `dm` are available in the global environment under their names
#' within the `dm`.
#'
#' @return Code for producing the given `dm`.
#'
#' @export
#' @examples
#' dm_nycflights13() %>%
#' dm_paste()
#'
#' dm_nycflights13() %>%
#' dm_paste(select = TRUE)
dm_paste <- function(dm, select = FALSE, tab_width = 2) {
# FIXME: Expose color as argument?
code <- dm_paste_impl(
dm = dm, select = select,
tab_width = tab_width, color = TRUE
)
cli::cli_code(code)
invisible(dm)
}
dm_paste_impl <- function(dm, select, tab_width, color) {
check_not_zoomed(dm)
check_no_filter(dm)
# we assume the tables exist and have the necessary columns
# code for including the tables
code <- glue("dm({paste(tick_if_needed({src_tbls(dm)}), collapse = ', ')})")
tab <- paste0(rep(" ", tab_width), collapse = "")
if (select) {
# adding code for selection of columns
tbl_select <- tibble(tbl_name = src_tbls(dm), tbls = dm_get_tables_impl(dm)) %>%
mutate(cols = map(tbls, colnames)) %>%
mutate(code = map2_chr(
tbl_name,
cols,
~ glue("{tab}dm_select({..1}, {paste0(tick_if_needed(..2), collapse = ', ')})")
))
code_select <- if (nrow(tbl_select)) summarize(tbl_select, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
code <- glue_collapse(c(code, code_select), sep = " %>%\n")
}
# adding code for establishing PKs
# FIXME: this will fail with compound keys
tbl_pks <- dm_get_all_pks_impl(dm) %>%
mutate(code = glue("{tab}dm_add_pk({table}, {pk_col})"))
code_pks <- if (nrow(tbl_pks)) summarize(tbl_pks, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
# adding code for establishing FKs
# FIXME: this will fail with compound keys
tbl_fks <- dm_get_all_fks_impl(dm) %>%
mutate(code = glue("{tab}dm_add_fk({child_table}, {child_fk_cols}, {parent_table})"))
code_fks <- if (nrow(tbl_fks)) summarize(tbl_fks, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
if (color) {
colors <- dm_get_colors(dm)
colors <- colors[names(colors) != "default"]
code_color <- imap_chr(colors, ~ glue("{tab}dm_set_colors({tick_if_needed(..2)} = {tick_if_needed(..1)})"))
} else {
code_color <- character()
}
glue_collapse(c(code, code_color, code_pks, code_fks), sep = " %>%\n")
}
|
f2e4aaee07703234d18f5eff67920816f5def552
|
f1a7b6a71400395eec7ab3445ad2142fa055634b
|
/man/bigtext.Rd
|
beee4a4e46a7f3629204009e29cc4a05627a0ed4
|
[] |
no_license
|
ketanmd/xacc
|
ada2ac185ddaae474d13b415e9e9448a32f39464
|
66e446afe96bb93c959ce003024134166dba0b97
|
refs/heads/master
| 2020-03-06T18:34:39.350188
| 2018-08-27T22:11:24
| 2018-08-27T22:11:24
| 127,009,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 282
|
rd
|
bigtext.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myacc-internal.R
\name{bigtext}
\alias{bigtext}
\title{set font size for plots}
\usage{
bigtext(size = 24)
}
\arguments{
\item{size}{font scale for output plots}
}
\description{
set font size for plots
}
|
9e1135a505642da3d1378c63de458d8199e8b987
|
78e656557b5cc6b77f8a30a3792e41b6f79f2f69
|
/aslib/man/getDefaultFeatureStepNames.Rd
|
8a54a5b68763d6f3df8392bba95fb4f4606fccb2
|
[] |
no_license
|
coseal/aslib-r
|
f7833aa6d9750f00c6955bade2b8dba6b452c9e1
|
2363baf4607971cd2ed1d784d323ecef898b2ea3
|
refs/heads/master
| 2022-09-12T15:19:20.609668
| 2022-09-02T17:48:51
| 2022-09-02T17:48:51
| 27,724,280
| 6
| 7
| null | 2021-10-17T17:34:54
| 2014-12-08T16:38:21
|
R
|
UTF-8
|
R
| false
| true
| 476
|
rd
|
getDefaultFeatureStepNames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getDefaultFeatureStepNames.R
\name{getDefaultFeatureStepNames}
\alias{getDefaultFeatureStepNames}
\title{Returns the default feature step names of scenario.}
\usage{
getDefaultFeatureStepNames(asscenario)
}
\arguments{
\item{asscenario}{[\code{\link{ASScenario}}]\cr
Algorithm selection scenario.}
}
\value{
[\code{character}].
}
\description{
Returns the default feature step names of scenario.
}
|
edd259ddc093a44a636ff596de11aa6eff6d8154
|
3632465c101324fc2ee5ad8dec22f45b30130c0c
|
/man/obsCols_ltp.Rd
|
196206aff8a93edd608d98eef015cc6cb65c2074
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
ddalthorp/GenEst
|
3921d5b4b48936dbe41667d5221b36a7d620735b
|
7dfc443913da2fb7d66d7a3553ac69714468422c
|
refs/heads/master
| 2023-08-16T23:17:34.073169
| 2023-05-29T01:23:46
| 2023-05-29T01:23:46
| 97,149,192
| 8
| 9
|
NOASSERTION
| 2023-05-25T17:45:11
| 2017-07-13T17:33:07
|
R
|
UTF-8
|
R
| false
| true
| 501
|
rd
|
obsCols_ltp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app_utilities.R
\name{obsCols_ltp}
\alias{obsCols_ltp}
\title{Select the columns from a data table that could be CP Last Time
Present observations}
\usage{
obsCols_ltp(data)
}
\arguments{
\item{data}{data table}
}
\value{
column names of columns that can be observations
}
\description{
Simple function to facilitate selection of columns that could
be Last Time Present observations for a CP model
}
|
ba7fdc0cf7b6aa3e1f4c2b3d770337a45c933669
|
3ab7e4dd53b3e88e62bfb193da41c761bb607530
|
/cachematrix.R
|
6cd8265a2ae41ea2a7cca7e6bcc1caecb2f36f24
|
[] |
no_license
|
jennicat/ProgrammingAssignment2
|
ff47f94077307e01627e9c9563b820c1bef439b0
|
2aff1dd43958968f31b4608651f846029b611548
|
refs/heads/master
| 2021-01-23T12:58:21.079215
| 2017-06-03T14:21:08
| 2017-06-03T14:21:08
| 93,215,335
| 0
| 0
| null | 2017-06-03T01:23:45
| 2017-06-03T01:23:45
| null |
UTF-8
|
R
| false
| false
| 2,198
|
r
|
cachematrix.R
|
## Programming Assignment #2 (Course 2, R programming, DataScience coursera.org)
## The purpose of these functions is to cache the results of a slow
## calculation. In this case it is for Matrix Inversion.
## The first function creates a faux matrix object that caches its own inverse.
## The second function calculates the inverse of the faux matrix object from
## from the first function - if its already cached it uses the cached value
## instead of recalculating (to save time!)
##
## Code adapted from the https://github.com/rdpeng/ProgrammingAssignment2
## sample code for caching Vectors by Roger D. Peng, last updated 2014
##
## Assumptions: the given (square) matrix is always invertible
##
## Function: makeCacheMatrix
## leverage the scoping in R to store values outside your environment
## This function sets a matrix (stores it for future reference) and nulls out the holder for the inversion
## This function gets the stored matrix
## This function inverts the matrix (via solve())
## This function gets the inverted matrix
## This function creates a list of each of the above functions
makeCacheMatrix <- function(x = matrix()) {
matrixInverse <- NULL
set <- function(y) {
x <<- y
matrixInverse <<- NULL
}
get <- function() x
setInv <- function(solve) matrixInverse <<- solve
getInv <- function() matrixInverse
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## Function: cacheSolve
## This function first checks to see if there's already an inverse defined for the given X
## if that's true, it retreives it
## If there's not already an inverse calculated, it sets the base matrix, then calculates
## the inverse, and caches both, then returns the calculated inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
matrixInverse <- x$getInv()
if(!is.null(matrixInverse)) {
message("getting cached data")
return(matrixInverse)
}
data <- x$get()
matrixInverse <- solve(data, ...)
x$setInv(matrixInverse)
matrixInverse
}
|
0178e56703582afd63f7d89363614b376eed0d6e
|
29586c3686eb5871bf4ea3182d917f18d750813e
|
/HMS-Summary/functions.R
|
004def890f4c2f4a48caecdb8b8e87d170ac6bc1
|
[] |
no_license
|
linxiliang/Keurig
|
6dcc8cf1ed80224726854d6d73d94e240a844e34
|
ebfea47380bd20bee175231563e55f05388febb1
|
refs/heads/master
| 2021-01-13T16:58:33.901332
| 2020-11-29T22:52:22
| 2020-11-29T22:52:22
| 77,353,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,045
|
r
|
functions.R
|
#Functions
#Time Trend Plot when first recorded purchase is on site.
#Then plot 4 things, number of units purchased, no.upcs/unit, no.brands/unit, and average pack size.
TimeTrendPlot<-function(dt, min.period=12, days=90, cond=TRUE, platform="ALL"){
dt = copy(dt)
if (toupper(platform)!="ALL") dt = dt[ptype==platform]
dt[, first_date := min(purchase_date), by = "household_code"]
dt[, period:=ceiling(as.numeric(purchase_date-first_date+1)/days)]
dt[, n_period:=max(period), by = "household_code"]
if (cond){
dt = dt[n_period>=min.period, ]
fname = paste("/figs/hh_purch_trend_",platform,"_cond_",min.period,".pdf", sep="")
} else{
fname = paste("/figs/hh_purch_trend_",platform,"_",min.period,".pdf", sep="")
}
setkey(dt, NULL)
upcs_tried = unique(dt[, .(household_code, period, upc, upc_ver_uc)])
setkey(upcs_tried, household_code, upc, upc_ver_uc, period)
upcs_tried[, first:=ifelse((1:length(period))==1, 1, 0),
by=c("household_code", "upc", "upc_ver_uc")]
upcs_tried = upcs_tried[, .(n_upcs=sum(first)),
by = c("household_code", "period")]
brands_tried = unique(dt[, .(household_code, period, brand_descr)])
setkey(brands_tried, household_code, brand_descr, period)
brands_tried[, first:=ifelse((1:length(period))==1, 1, 0),
by=c("household_code", "brand_descr")]
brands_tried = brands_tried[,.(n_brands=sum(first)),
by = c("household_code", "period")]
purch_level = dt[, .(n_units = sum(quantity),
n_packs = sum(quantity*size1_amount)),
by = c("household_code", "period")]
setkey(upcs_tried, household_code, period)
setkey(brands_tried, household_code, period)
setkey(purch_level, household_code, period)
time_pattern = as.data.table(expand.grid(household_code=unique(purch_level[,household_code]),
period = unique(purch_level[, period])))
setkey(time_pattern, household_code, period)
time_pattern = brands_tried[time_pattern]
time_pattern = upcs_tried[time_pattern]
time_pattern = purch_level[time_pattern]
NAto0<-function(x) ifelse(is.na(x),0,x)
NOInf<-function(x) ifelse(is.infinite(x),as.numeric(NA),x)
#time_pattern = time_pattern[, lapply(.SD, NAto0)]
time_pattern[, `:=`(n_units=NAto0(n_units), n_packs=NAto0(n_packs))]
time_pattern = time_pattern[, .(n_units=mean(n_units),
n_units.sd=sd(n_units),
n_units.se=sd(n_units)/sqrt(.N),
n_upcs=mean(n_upcs, na.rm=TRUE),
n_upcs.sd=sd(n_upcs, na.rm=TRUE),
n_upcs.se=sd(n_upcs, na.rm=TRUE)/sqrt(length(na.omit(n_upcs))),
n_brands=mean(n_brands, na.rm=TRUE),
n_brands.sd=sd(n_brands, na.rm=TRUE),
n_brands.se=sd(n_brands, na.rm=TRUE)/sqrt(length(na.omit(n_brands))),
n_packs=mean(NOInf(n_packs/n_units), na.rm=TRUE),
n_packs.sd=sd(NOInf(n_packs/n_units), na.rm=TRUE),
n_packs.se=sd(NOInf(n_packs/n_units), na.rm=TRUE)/
sqrt(length(na.omit(NOInf(n_packs/n_units))))),
, by = "period"]
setkey(time_pattern, period)
time_pattern = time_pattern[period<=min.period]
#Plotting and export as pdf file
pdf(file=paste(graph_dir, fname, sep=""), width=14, height=10)
par(mfrow=c(2,2))
plot(time_pattern[, period], time_pattern[, n_units],
ylim=c(min(time_pattern[, (n_units-n_units.se)]),max(time_pattern[, (n_units+n_units.se)])),
type="o", xlab = paste("Period (Per ", days, " Days)",sep=""), ylab="Units")
lines(time_pattern[, period], time_pattern[, (n_units+n_units.se)], lty=2)
lines(time_pattern[, period], time_pattern[, (n_units-n_units.se)], lty=2)
plot(time_pattern[, period], time_pattern[, n_upcs],
ylim=c(min(time_pattern[, (n_upcs-n_upcs.se)]),max(time_pattern[, (n_upcs+n_upcs.se)])),
type="o", xlab = paste("Period (Per ", days, " Days)",sep=""), ylab="No of New UPCs")
lines(time_pattern[, period], time_pattern[, (n_upcs+n_upcs.se)], lty=2)
lines(time_pattern[, period], time_pattern[, (n_upcs-n_upcs.se)], lty=2)
plot(time_pattern[, period], time_pattern[, n_brands],
ylim=c(min(time_pattern[, (n_brands-n_brands.se)]),max(time_pattern[, (n_brands+n_brands.se)])),
type="o", xlab = paste("Period (Per ", days, " Days)",sep=""), ylab="No of New Brands")
lines(time_pattern[, period], time_pattern[, (n_brands+n_brands.se)], lty=2)
lines(time_pattern[, period], time_pattern[, (n_brands-n_brands.se)], lty=2)
plot(time_pattern[, period], time_pattern[, n_packs],
ylim=c(min(time_pattern[, (n_packs-n_packs.se)]),max(time_pattern[, (n_packs+n_packs.se)])),
type="o", xlab = paste("Period (Per ", days, " Days)",sep=""), ylab="Mean Pack Size")
lines(time_pattern[, period], time_pattern[, (n_packs+n_packs.se)], lty=2)
lines(time_pattern[, period], time_pattern[, (n_packs-n_packs.se)], lty=2)
dev.off()
par(mfrow=c(1,1))
}
PlatformTrend <- function(dt, days=90, start=as.Date("2004-01-01")){
dt = copy(dt)
dt[, period:=start + days*(ceiling(as.numeric(purchase_date-start+1)/days)-1)]
period_purch=dt[,.(units=.N*sum(quantity*projection_factor)/sum(projection_factor),
packs=.N*sum(quantity*size1_amount*projection_factor)/
sum(projection_factor),
spent=.N*sum(projection_factor*(total_price_paid-coupon_value))/
sum(projection_factor),
N_prods=length(unique(upc)),
N_brands=length(unique(brand_descr)),
N_hh=length(unique(household_code))),
by = c("ptype", "period")]
period_purch[, `:=`(price = spent/packs)]
#Compute concentration ratio and HHI of UPCs and Brands by platform
upc_share = dt[,.(spent=.N*sum(projection_factor*(total_price_paid-coupon_value))/
sum(projection_factor)),
by = c("ptype", "period", "upc", "upc_ver_uc")]
upc_share[, share:=spent/sum(spent), by = c("ptype", "period")]
upc_share = upc_share[order(-share), ]
setkey(upc_share, ptype, period)
upc_share = upc_share[, .(cprod5=sum(share[1:min(length(share), 5)]),
prodHHI=sum(share[1:min(length(share), 10)]^2)),
by = c("ptype", "period")]
brand_share = dt[,.(spent=.N*sum(projection_factor*(total_price_paid-coupon_value))/
sum(projection_factor)),
by = c("ptype", "period", "brand_descr")]
brand_share[, share:=spent/sum(spent), by = c("ptype", "period")]
brand_share = brand_share[order(-share), ]
setkey(brand_share, ptype, period)
brand_share = brand_share[, .(cbrand3=sum(share[1:min(length(share), 3)]),
brandHHI=sum(share[1:min(length(share), 3)]^2)),
by = c("ptype", "period")]
market_share = brand_share[upc_share]
setkeyv(period_purch, c("ptype", "period"))
period_purch = period_purch[market_share]
types = c("KEURIG")
#Plotting and export as pdf file
vars = c("packs", "price", "N_prods", "N_brands",
"N_hh", "cprod5", "prodHHI", "cbrand3", "brandHHI")
for (v in vars){
yname = ifelse(v=="price", "Price",
ifelse(v=="N_brands", "No. of brands",
ifelse(v=="cprod5", "Concentration ratio - Top 5 UPCs",
ifelse(v=="prodHHI", "HHI (Limit to top 10 UPCs",
ifelse(v=="cbrand3", "Concentration ratio - Top 3 Brands",
ifelse(v=="brandHHI", "HHI (Limit to 3 Brands)",
"OTHER"))))))
if(yname=="OTHER"){
period_purch[, c(v):=log10(eval(parse(text=v)))]
}
yname = ifelse(v=="packs", "Packs (log 10)",
ifelse(v=="N_prods", "No. of UPCs (log 10)",
ifelse(v=="N_hh", "No. of HH making a purchase (log 10)", yname)))
pdf(file=paste(graph_dir, "/figs/",v,"_consume_by_platform.pdf", sep=""), width=14, height=10)
par(mfrow=c(2,2))
ymin = min(period_purch[, v, with=FALSE])
ymax = max(period_purch[, v, with=FALSE])
xmin = min(period_purch[, period])
xmax = max(period_purch[, period])
for(type in types){
print(period_purch[ptype==type,
plot(period, eval(parse(text=v)), main=type, type="o", ylab=yname,
xlab = paste("Period (Per ", days, " Days)",sep=""),
xlim = c(xmin, xmax), ylim=c(ymin, ymax))])
}
dev.off()
}
par(mfrow=c(1,1))
}
|
db760b5ed8fd8fbfca1d4e4848523caa7c84ad49
|
5e70dd71ba15eff2de5fbb134f356d6a062551a0
|
/Programs/makeBigPlotTrendsObj2.R
|
3d76927082c7acee846f60c4082b86556742d718
|
[] |
no_license
|
jasmyace/PrairieNPS
|
96989bbf29a12eff98594c70a0624b0b2b736297
|
e9afb6a3db56fb79fad32b95774b9b45c725b2df
|
refs/heads/master
| 2020-04-06T06:59:18.863854
| 2016-08-30T14:18:13
| 2016-08-30T14:18:13
| 63,260,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,053
|
r
|
makeBigPlotTrendsObj2.R
|
makeBigPlotTrendsObj2 <- function(tc){
png(paste0("//lar-file-srv/Data/NPS/Prairie/Analysis/Trending/Objective2-Trends.png"),width=36,height=90,units="in",res=300)
# ---- Medium, with 4.
string <- c(rep(1,28),rep(2,28),seq(3,6),rep(7,5),8,rep(9,5),10,rep(11,5),12,rep(13,5),14,rep(15,28),seq(16,19),rep(20,5),21,rep(22,5),23,rep(24,5),25,rep(26,5),27,rep(28,28),seq(29,56,1),rep(57,28),seq(58,85,1),rep(86,28),seq(87,114,1),rep(115,28),seq(116,143,1),rep(144,28),seq(145,172,1),rep(173,28),seq(174,201,1),rep(202,28),seq(203,230,1),rep(231,28),seq(232,259,1),rep(260,28),seq(261,288,1),rep(289,28),seq(290,317,1),rep(318,28),seq(319,346,1),rep(347,28),seq(348,375,1),rep(376,28),seq(377,404,1),rep(405,28),seq(406,433,1),rep(434,28),seq(435,462,1),rep(463,28),seq(464,491,1),rep(492,28),seq(493,520,1),rep(521,28),seq(522,549,1),rep(550,28),seq(551,578,1),rep(579,28),seq(580,607,1),rep(608,28),seq(609,636,1),rep(637,28),seq(638,665,1),rep(666,28),seq(667,694,1),rep(695,28),seq(696,723,1),rep(724,28),seq(725,752,1),rep(753,28),seq(754,781,1),rep(782,28),seq(783,810,1),rep(811,28),seq(812,839,1),rep(840,28),seq(841,868,1),rep(869,28),seq(870,897,1),rep(898,28),seq(899,926,1),rep(927,28),seq(928,955,1),rep(956,28),seq(957,984,1),rep(985,28),seq(986,1013,1),rep(1014,28),seq(1015,1042,1),rep(1043,28),seq(1044,1071,1),rep(1072,28),seq(1073,1100,1),rep(1101,28),seq(1102,1129,1),rep(1130,28),seq(1131,1158,1),rep(1159,28),seq(1160,1187,1),rep(1188,28),seq(1189,1216,1),rep(1217,28),seq(1218,1245,1),rep(1246,28))
widths <- c(0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005,0.06643,0.005)
heights <- c(0.02,0.005,0.01,0.005,0.04,0.005,0.01,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.005,0.01683,0.01)
a <- layout(matrix(string,90,28,byrow=TRUE),widths=widths,heights=heights)
layout.show(a)
# ---- Row 1
plotText(paste0("Objective 2 Analytics"),5)
# ---- Row 2
plotEmpty()
# ---- Row 3
plotEmpty()
plotEmpty()
plotEmpty()
plotEmpty()
plotText("Herbaceous",3.5)
plotEmpty()
plotText("Shrubs",3.5)
plotEmpty()
plotText("Trees",3.5)
plotEmpty()
plotText("Composite",3.5)
plotEmpty()
# ---- Row 4
plotEmpty()
# ---- Row 5
plotEmpty()
plotEmpty()
plotEmpty()
plotEmpty()
plotSpatialTrends(fitHu,STransects,2012,tc,"pCoveru","Blues")
plotEmpty()
plotSpatialTrends(fitSu,STransects,2012,tc,"pCoveru","Oranges")
plotEmpty()
plotSpatialTrends(fitTu,STransects,2012,tc,"pCoveru","Greens")
plotEmpty()
plotSpatialTrends(fitAu,STransects,2012,tc,"pCoveru","Reds")
plotEmpty()
# ---- Row 6
plotEmpty()
# ---- Row 7
plotEmpty()
plotEmpty()
plotEmpty()
plotEmpty()
plotText("Untransformed",2.5)
plotEmpty()
plotText("Logit",2.5)
plotEmpty()
plotText("Arc-Sine",2.5)
plotEmpty()
plotText("Untransformed",2.5)
plotEmpty()
plotText("Logit",2.5)
plotEmpty()
plotText("Arc-Sine",2.5)
plotEmpty()
plotText("Untransformed",2.5)
plotEmpty()
plotText("Logit",2.5)
plotEmpty()
plotText("Arc-Sine",2.5)
plotEmpty()
plotText("Untransformed",2.5)
plotEmpty()
plotText("Logit",2.5)
plotEmpty()
plotText("Arc-Sine",2.5)
plotEmpty()
# ---- Row 8
plotEmpty()
# ---- Row 9
plotText("Piepho & Ogutu\nPlot of Fixed Effects",1.5)
plotEmpty()
plotEmpty()
plotEmpty()
plotAllObj1(dat,fitHu,"VegType","H",tc,brewer.pal(9,"Paired")[1:2],0.10,2008) # blue
plotEmpty()
plotAllObj1(dat,fitHl,"VegType","H",tc,brewer.pal(9,"Paired")[1:2],0.10,2008) # blue
plotEmpty()
plotAllObj1(dat,fitHa,"VegType","H",tc,brewer.pal(9,"Paired")[1:2],0.10,2008) # blue
plotEmpty()
plotAllObj1(dat,fitSu,"VegType","S",tc,brewer.pal(9,"Paired")[7:8],0.10,2008) # orange
plotEmpty()
plotAllObj1(dat,fitSl,"VegType","S",tc,brewer.pal(9,"Paired")[7:8],0.10,2008) # orange
plotEmpty()
plotAllObj1(dat,fitSa,"VegType","S",tc,brewer.pal(9,"Paired")[7:8],0.10,2008) # orange
plotEmpty()
plotAllObj1(dat,fitTu,"VegType","T",tc,brewer.pal(9,"Paired")[3:4],0.10,2008) # green
plotEmpty()
plotAllObj1(dat,fitTl,"VegType","T",tc,brewer.pal(9,"Paired")[3:4],0.10,2008) # green
plotEmpty()
plotAllObj1(dat,fitTa,"VegType","T",tc,brewer.pal(9,"Paired")[3:4],0.10,2008) # green
plotEmpty()
plotAllObj1(dat,fitAu,"VegType","A",tc,brewer.pal(9,"Paired")[5:6],0.10,2008) # orange
plotEmpty()
plotAllObj1(dat,fitAl,"VegType","A",tc,brewer.pal(9,"Paired")[5:6],0.10,2008) # orange
plotEmpty()
plotAllObj1(dat,fitAa,"VegType","A",tc,brewer.pal(9,"Paired")[5:6],0.10,2008) # orange
plotEmpty()
# ---- Row 10
plotEmpty()
# ---- Row 11
plotText("Histogram of\nOutcomes",1.5)
plotEmpty()
plotEmpty()
plotEmpty()
plotHistObj1(dat,"VegType","H",5,"pCoveru")
plotEmpty()
plotHistObj1(dat,"VegType","H",5,"pCoverl")
plotEmpty()
plotHistObj1(dat,"VegType","H",5,"pCovera")
plotEmpty()
plotHistObj1(dat,"VegType","S",5,"pCoveru")
plotEmpty()
plotHistObj1(dat,"VegType","S",5,"pCoverl")
plotEmpty()
plotHistObj1(dat,"VegType","S",5,"pCovera")
plotEmpty()
plotHistObj1(dat,"VegType","T",95,"pCoveru")
plotEmpty()
plotHistObj1(dat,"VegType","T",95,"pCoverl")
plotEmpty()
plotHistObj1(dat,"VegType","T",95,"pCovera")
plotEmpty()
plotHistObj1(dat,"VegType","A",5,"pCoveru")
plotEmpty()
plotHistObj1(dat,"VegType","A",5,"pCoverl")
plotEmpty()
plotHistObj1(dat,"VegType","A",5,"pCovera")
plotEmpty()
# ---- Row 12
plotEmpty()
# ---- Now, plot each individual transect one-by-one.
LocCodes <- unique(dat$LocCode)
nLocCodes <- length(LocCodes)
for(k in 1:nLocCodes){
locCode <- LocCodes[k]
# ---- Row j + 12
plotText(paste0("Transect\n",locCode),1.5)
plotEmpty()
plotTransectMap(locCode)
plotEmpty()
plotOneObj1(dat,fitHu,"VegType","H",tc,brewer.pal(9,"Paired")[1:2],locCode,0.10,2008) # blue
plotEmpty()
plotOneObj1(dat,fitHl,"VegType","H",tc,brewer.pal(9,"Paired")[1:2],locCode,0.10,2008) # blue
plotEmpty()
plotOneObj1(dat,fitHa,"VegType","H",tc,brewer.pal(9,"Paired")[1:2],locCode,0.10,2008) # blue
plotEmpty()
plotOneObj1(dat,fitSu,"VegType","S",tc,brewer.pal(9,"Paired")[7:8],locCode,0.10,2008) # orange
plotEmpty()
plotOneObj1(dat,fitSl,"VegType","S",tc,brewer.pal(9,"Paired")[7:8],locCode,0.10,2008) # orange
plotEmpty()
plotOneObj1(dat,fitSa,"VegType","S",tc,brewer.pal(9,"Paired")[7:8],locCode,0.10,2008) # orange
plotEmpty()
plotOneObj1(dat,fitTu,"VegType","T",tc,brewer.pal(9,"Paired")[3:4],locCode,0.10,2008) # green
plotEmpty()
plotOneObj1(dat,fitTl,"VegType","T",tc,brewer.pal(9,"Paired")[3:4],locCode,0.10,2008) # green
plotEmpty()
plotOneObj1(dat,fitTa,"VegType","T",tc,brewer.pal(9,"Paired")[3:4],locCode,0.10,2008) # green
plotEmpty()
plotOneObj1(dat,fitAu,"VegType","A",tc,brewer.pal(9,"Paired")[5:6],locCode,0.10,2008) # orange
plotEmpty()
plotOneObj1(dat,fitAl,"VegType","A",tc,brewer.pal(9,"Paired")[5:6],locCode,0.10,2008) # orange
plotEmpty()
plotOneObj1(dat,fitAa,"VegType","A",tc,brewer.pal(9,"Paired")[5:6],locCode,0.10,2008) # orange
plotEmpty()
# ---- Row (j + 1) + 9
plotEmpty()
}
# ---- Row End
plotText("This is going to be a boring set of footnotes.",0.5)
dev.off()
}
|
2f267d6373cac67c9c8e9f27cbeb2f9c17c37681
|
f680ff6e25a828bbac8b244f9061a3fa71125836
|
/R/MultiDataSet-add_table.R
|
cc8fa0ff3afb3d5ff746724ab85497a2808e4930
|
[
"MIT"
] |
permissive
|
isglobal-brge/MultiDataSet
|
db7348454ffc202469b7354d0b7252eedc7659a4
|
c4bea804bd9b8b53b8d96928d9148f09787f475e
|
refs/heads/master
| 2021-10-07T17:58:31.164306
| 2021-10-07T13:34:50
| 2021-10-07T13:34:50
| 80,110,013
| 2
| 0
|
MIT
| 2021-01-29T10:27:17
| 2017-01-26T11:45:31
|
R
|
UTF-8
|
R
| false
| false
| 1,958
|
r
|
MultiDataSet-add_table.R
|
#' @describeIn MultiDataSet Method to add a \code{matrix} to \code{MultiDataSet}.
#' @aliases MultiDataSet-methods
setMethod(
f = "add_table",
signature = c("MultiDataSet", "matrix"),
definition = function(object, set, dataset.type, dataset.name = NULL, warnings = TRUE,
overwrite = FALSE) {
dataset.name <- paste(c(dataset.type, dataset.name), collapse = "+")
if(dataset.name %in% names(object)){
if (!overwrite){
stop("There is already an object in this slot. Set overwrite = TRUE to overwrite the previous set.")
}
if (warnings) {
warning("Slot '", dataset.name, "' is already set in 'MultiDataSet'. Previous content will be overwritten.")
}
}
if (is.null(colnames(set))){
stop("Set must contain colnames.")
}
if (sum(duplicated(colnames(set))) > 0){
stop("Colnames of set must be unique.")
}
if (is.null(rownames(set))){
stop("Set must contain rownames.")
}
if (sum(duplicated(rownames(set))) > 0){
stop("Rownames of set must be unique.")
}
env <- new("environment")
assign("mat", set, env)
object@assayData[[dataset.name]] <- env
pheno <- as(data.frame(id = colnames(set)), "AnnotatedDataFrame")
rownames(pheno) <- pheno$id
feats <- as(data.frame(id = rownames(set)), "AnnotatedDataFrame")
rownames(feats) <- feats$id
object@phenoData[[dataset.name]] <- list(main = pheno)
object@featureData[[dataset.name]] <- list(main = feats)
object@rowRanges[[dataset.name]] <- NA
returnfunc <- function(env, phe, fet, extra) {
env$mat
}
object@return_method[[dataset.name]] <- returnfunc
return(object)
}
)
|
c3e7a38b23ac56d10a9aa11e131568f9e5260006
|
770ce91dddf1804af88d4ce763d8371a94079498
|
/R/DNAcopyMethods.R
|
8bad2ef82a6ad100fc8da2844744fc12534e2699
|
[] |
no_license
|
rptashkin/DNAcopy
|
564d58a8cf173a91a07e3577f2200b537dbb7fd7
|
3d917491857fe08010fd1747029f8bb60f00179a
|
refs/heads/master
| 2020-05-30T10:09:40.082310
| 2017-04-24T19:50:57
| 2017-04-24T19:50:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,308
|
r
|
DNAcopyMethods.R
|
CNA <- function(genomdat, chrom, maploc, data.type=c("logratio","binary"),
sampleid=NULL, presorted=FALSE)
{
if (is.data.frame(genomdat)) genomdat <- as.matrix(genomdat)
if (!is.numeric(genomdat)) stop("genomdat must be numeric")
if (!is.numeric(maploc)) stop("maploc must be numeric")
data.type <- match.arg(data.type)
ina <- (!is.na(chrom) & is.finite(maploc))
if (sum(!ina)>0)
warning("markers with missing chrom and/or maploc removed\n")
if (!presorted) {
sortindex <- which(ina)[order(chrom[ina], maploc[ina])]
} else {
sortindex <- which(ina)
}
if (is.factor(chrom)) chrom <- as.character(chrom)
# added to allow arrays of single dimension - results from data.frame ops
if (is.array(genomdat)) {
if (length(dim(genomdat)) == 1) {
genomdat <- as.matrix(genomdat)
}
}
if (is.vector(genomdat)) genomdat <- as.matrix(genomdat)
if (!missing(sampleid)) {
if (length(sampleid) != ncol(genomdat)) {
warning("length(sampleid) and ncol(genomdat) differ, names ignored\n")
sampleid <- paste("Sample", 1:ncol(genomdat))
}
} else {
sampleid <- paste("Sample", 1:ncol(genomdat))
}
colnames(genomdat) <- sampleid
zzz <- data.frame(chrom=I(chrom), maploc=maploc, genomdat)
zzz <- zzz[sortindex,]
# check for duplicate probes (i.e. repeated maploc within a chromosome)
if (length(ii <- which(diff(maploc)==0)) > 0) {
if (any(chrom[ii]==chrom[ii+1])) warning("array has repeated maploc positions\n")
}
attr(zzz, "data.type") <- data.type
class(zzz) <- c("CNA","data.frame")
zzz
}
subset.CNA <- function(x, chromlist=NULL, samplelist=NULL, ...)
{
if (!inherits(x, 'CNA')) stop("First arg must be of class CNA")
chrom <- x$chrom
uchrom <- unique(chrom)
if (missing(chromlist)) chromlist <- uchrom
if (length(setdiff(chromlist, uchrom)) > 0)
stop("chromlist contains chromosomes not in the data")
if (length(chromlist) > length(unique(chromlist)))
warning("duplicate chromosomes in chromlist removed")
sampleid <- colnames(x)[-(1:2)]
if (missing(samplelist)) samplelist <- sampleid
nsample <- length(sampleid)
if (length(setdiff(samplelist, 1:nsample)) > 0 & length(setdiff(samplelist, sampleid)) > 0)
stop("samplelist should be a list of valid sample numbers or names")
if (!is.numeric(samplelist)) samplelist <- match(samplelist, names(x)) - 2
if (length(samplelist) > length(unique(samplelist)))
warning("duplicate samples in samplelist removed")
samplelist <- unique(samplelist)
y <- x[chrom %in% chromlist,c(1:2,samplelist+2)]
attr(y, "data.type") <- attr(x, "data.type")
y
}
smooth.CNA <- function(x, smooth.region=10, outlier.SD.scale=4,
smooth.SD.scale=2, trim=0.025)
{
if (!inherits(x, 'CNA')) stop("First arg must be of class CNA")
nsample <- ncol(x)-2
chrom <- x$chrom
uchrom <- unique(chrom)
if(attr(x, "data.type")=="binary") stop("Not smoothing binary data ")
for (isamp in 1:nsample) {
genomdat <- x[,isamp+2]
ina <- which(is.finite(genomdat))
trimmed.SD <- sqrt(trimmed.variance(genomdat[ina], trim))
outlier.SD <- outlier.SD.scale*trimmed.SD
smooth.SD <- smooth.SD.scale*trimmed.SD
k <- smooth.region
n <- length(genomdat[ina])
cfrq <- diff(c(which(!duplicated(chrom[ina])), n+1))
nchr <- length(cfrq) # to allow for some chrom with all missing
smoothed.data <- .Fortran("smoothLR",
as.integer(n),
as.double(genomdat[ina]),
as.integer(nchr),
as.integer(cfrq),
sgdat=double(n),
as.integer(k),
as.double(outlier.SD),
as.double(smooth.SD),
PACKAGE = "DNAcopy")$sgdat
x[,isamp+2][ina] <- smoothed.data
}
x
}
print.CNA <- function(x, ...)
{
if (!inherits(x, 'CNA')) stop("First arg must be of class CNA")
cat("Number of Samples", ncol(x)-2,
"\nNumber of Probes ", nrow(x),
"\nData Type ", attr(x,"data.type"),"\n")
}
plot.DNAcopy <- function (x, plot.type=c("whole", "plateau", "samplebychrom",
"chrombysample"), xmaploc=FALSE, altcol=TRUE,
sbyc.layout=NULL, cbys.nchrom=1, cbys.layout=NULL,
include.means=TRUE, zeroline=TRUE, pt.pch=NULL,
pt.cex=NULL, pt.cols=NULL, segcol=NULL, zlcol=NULL,
ylim=NULL, lwd=NULL, ...)
{
if (!inherits(x, "DNAcopy"))
stop("First arg must be the result of segment")
xdat <- x$data
nsample <- ncol(xdat)-2
if(missing(ylim)) {
uylim <- max(abs(xdat[,-(1:2)]), na.rm=TRUE)
ylim <- c(-uylim, uylim)
}
xres <- x$output
if(dev.cur() <= 1) dev.new()
int.dev <- dev.interactive()
plot.type <- match.arg(plot.type)
op <- par(no.readonly = TRUE)
parask <- par("ask")
if (int.dev & !parask & nsample>1) par(ask = TRUE)
sampleid <- colnames(xdat)[-(1:2)]
chrom0 <- xdat$chrom
uchrom <- unique(chrom0)
nchrom <- length(uchrom)
if (xmaploc) {
maploc0 <- as.numeric(xdat$maploc)
if(length(uchrom)>1 & max(maploc0[chrom0==uchrom[1]]) > min(maploc0[chrom0==uchrom[2]])) {
plen <- max(maploc0[chrom0==uchrom[1]])
for(i in 2:nchrom) {
maploc0[chrom0==uchrom[i]] <- plen + maploc0[chrom0==uchrom[i]]
plen <- max(maploc0[chrom0==uchrom[i]])
}
}
}
if (missing(pt.pch)) pt.pch <- "."
if (missing(pt.cex)) {
if (pt.pch==".") { pt.cex <- 3}
else {pt.cex <- 1}
}
wcol0 <- rep(1, length(chrom0))
if (altcol) {
j <- 0
for (i in uchrom) {
j <- (j+1) %% 2
wcol0[chrom0==i] <- 1+j
}
}
if (missing(pt.cols)) pt.cols <- c("black","green")
if (missing(segcol)) segcol <- "red"
if (missing(zlcol)) zlcol <- "grey"
if (missing(lwd)) lwd <- 3
if (plot.type == "chrombysample") {
cat("Setting multi-figure configuration\n")
par(mar = c(0, 4, 0, 2), oma = c(4, 0, 4, 0), mgp = c(2, 0.7, 0))
if (missing(cbys.layout)) {
nrow <- ncol <- ceiling(sqrt(nsample))
if (nrow*ncol - nsample > 0) {
nrow <- nrow - 1
ncol <- ncol + 1
}
if (nrow*ncol - nsample >= nrow) ncol <- ncol - 1
cbys.layout <- c(nrow, ncol)
}
lmat0 <- lmat1 <- c(1:nsample, rep(-cbys.nchrom*nsample, prod(cbys.layout) - nsample))
for(i in 1:(cbys.nchrom-1)) {
lmat1 <- c(lmat1,lmat0+nsample*i)
}
lmat1[lmat1<0] <- 0
lmat <- matrix(lmat1, nrow = cbys.layout[1], ncol = cbys.nchrom*cbys.layout[2], byrow = FALSE)
layout(lmat)
}
if (plot.type == "samplebychrom") {
cat("Setting multi-figure configuration\n")
par(mar = c(4, 4, 4, 2), oma = c(0, 0, 2, 0), mgp = c(2, 0.7, 0))
if (missing(sbyc.layout)) {
nrow <- ncol <- ceiling(sqrt(nchrom))
if (nrow*ncol - nchrom > 0) {
nrow <- nrow - 1
ncol <- ncol + 1
}
if (nrow*ncol - nchrom > ncol) ncol <- ncol - 1
sbyc.layout <- c(nrow, ncol)
}
lmat <- matrix(c(1:nchrom, rep(0,prod(sbyc.layout)-nchrom)),
nrow = sbyc.layout[1], ncol = sbyc.layout[2], byrow=TRUE)
layout(lmat)
}
if (plot.type == "chrombysample") {
atchrom <- 0.5/cbys.nchrom
for (ichrom in uchrom) {
if (xmaploc) maploc1 <- maploc0[chrom0==ichrom]
for (isamp in 1:nsample) {
genomdat <- xdat[chrom0==ichrom, isamp+2]
ina <- which(is.finite(genomdat))
genomdat <- genomdat[ina]
if (xmaploc) maploc <- maploc1[ina]
ii <- cumsum(c(0, xres$num.mark[xres$ID == sampleid[isamp] & xres$chrom==ichrom]))
mm <- xres$seg.mean[xres$ID == sampleid[isamp] & xres$chrom==ichrom]
kk <- length(ii)
zz <- cbind(ii[-kk] + 1, ii[-1])
if (xmaploc) {
plot(maploc, genomdat, pch = pt.pch, cex=pt.cex, xaxt="n", ylim = ylim, ylab = sampleid[isamp])
} else {
plot(genomdat, pch = pt.pch, cex=pt.cex, xaxt="n", ylim = ylim, ylab = sampleid[isamp])
}
if(zeroline) abline(h=0, col=zlcol, lwd=lwd)
if (isamp%%cbys.layout[1] == 0) {
axis(1, outer=TRUE)
title(xlab="Index")
}
if (include.means) {
if (xmaploc) {
segments(maploc[zz[,1]], mm, x1=maploc[zz[,2]], y1=mm, col = segcol, lwd=lwd)
} else {
segments(zz[,1], mm, x1=zz[,2], y1=mm, col = segcol, lwd=lwd)
}
# for (i in 1:(kk - 1)) {
# if (xmaploc) {
# lines(maploc[zz[i, ]], rep(mm[i], 2), col = segcol, lwd=lwd)
# } else {
# lines(zz[i, ], rep(mm[i], 2), col = segcol, lwd=lwd)
# }
# }
}
}
mtext(paste("Chromosome",ichrom), side = 3, line = 1, at = atchrom, outer=TRUE, font=2)
atchrom <- atchrom + 1/cbys.nchrom
atchrom <- atchrom - floor(atchrom)
}
} else {
for (isamp in 1:nsample)
{
genomdat <- xdat[, isamp+2]
ina <- which(is.finite(genomdat))
genomdat <- genomdat[ina]
wcol <- wcol0[ina]
chrom <- chrom0[ina]
if (xmaploc) maploc <- maploc0[ina]
ii <- cumsum(c(0, xres$num.mark[xres$ID == sampleid[isamp]]))
mm <- xres$seg.mean[xres$ID == sampleid[isamp]]
kk <- length(ii)
zz <- cbind(ii[-kk] + 1, ii[-1])
if(missing(ylim)) ylim <- range(c(genomdat, -genomdat))
if (plot.type=="whole")
{
if (xmaploc) {
plot(maploc, genomdat, pch = pt.pch, cex=pt.cex, col=pt.cols[wcol], main = sampleid[isamp], ylab = "", ylim = ylim)
if(zeroline) abline(h=0, col=zlcol, lwd=lwd)
} else {
plot(genomdat, pch = pt.pch, cex=pt.cex, col=pt.cols[wcol], main = sampleid[isamp], ylab = "", ylim = ylim)
if(zeroline) abline(h=0, col=zlcol, lwd=lwd)
}
if (include.means) {
if (xmaploc) {
segments(maploc[zz[,1]], mm, x1=maploc[zz[,2]], y1=mm, col = segcol, lwd=lwd)
} else {
segments(zz[,1], mm, x1=zz[,2], y1=mm, col = segcol, lwd=lwd)
}
# for (i in 1:(kk - 1))
# {
# if (xmaploc) {
# lines(maploc[zz[i, ]], rep(mm[i], 2), col = segcol, lwd=lwd)
# } else {
# lines(zz[i, ], rep(mm[i], 2), col = segcol, lwd=lwd)
# }
# }
}
}
if (plot.type=="samplebychrom")
{
cc <- xres$chrom[xres$ID == sampleid[isamp]]
for (ichrom in uchrom)
{
if (xmaploc) {
plot(maploc[chrom == ichrom], genomdat[chrom == ichrom], pch = pt.pch, cex=pt.cex, xlab="maploc", ylab = "", main = paste("Chromosome", ichrom), ylim = ylim)
} else {
plot(genomdat[chrom == ichrom], pch = pt.pch, cex=pt.cex, ylab = "", main = paste("Chromosome", ichrom), ylim = ylim)
}
if(zeroline) abline(h=0, col=zlcol, lwd=lwd)
if (include.means) {
jj <- which(cc==ichrom)
jj0 <- min(jj)
if (xmaploc) {
segments(maploc[zz[jj,1]], mm[jj], x1=maploc[zz[jj,2]], y1=mm[jj], col = segcol, lwd=lwd)
} else {
segments(1+zz[jj,1]-zz[jj0,1], mm[jj], x1=1+zz[jj,2]-zz[jj0,1], y1=mm[jj], col = segcol, lwd=lwd)
}
# for (i in jj)
# {
# if (xmaploc) {
# lines(maploc[zz[i, ]], rep(mm[i], 2), col = segcol, lwd=lwd)
# } else {
# lines(1+zz[i, ]-zz[jj0,1], rep(mm[i], 2), col = segcol, lwd=lwd)
# }
# }
}
}
mtext(sampleid[isamp], side = 3, line = 0, outer = TRUE, font=2)
}
if (plot.type=="plateau")
{
omm <- order(mm)
ozz <- zz[omm,]
ina <- unlist(apply(ozz, 1, function(ii) ii[1]:ii[2]))
plot(genomdat[ina], pch = pt.pch, cex=pt.cex, main = sampleid[isamp], ylab = "", ylim = ylim)
if(zeroline) abline(h=0, col=zlcol, lwd=lwd)
if (include.means) {
ii <- cumsum(c(0, xres$num.mark[xres$ID == sampleid[isamp]][omm]))
smm <- mm[omm]
zz <- cbind(ii[-kk] + 1, ii[-1])
segments(zz[,1], smm, x1=zz[,2], y1=smm, col = segcol, lwd=lwd)
# for (i in 1:(kk-1)) lines(zz[i, ], rep(smm[i], 2), col = segcol, lwd=lwd)
}
}
}
}
on.exit( if (plot.type=="chrombysample" | plot.type=="samplebychrom") {
par(op)
} else { if(int.dev & !parask & nsample>1) par(ask=parask) })
}
print.DNAcopy <- function(x, showSegRows=FALSE, ...)
{
if (!inherits(x, "DNAcopy")) stop("Object is not the result of segment")
if (!is.null(cl<- x$call))
{
cat("Call:\n")
dput(cl)
cat("\n")
}
if (showSegRows) {
if (is.null(x$segRows)) {
print(x$output)
warning("segRows missing. Object may be a subset or from DNAcopy < 1.23.2.\n")
} else {
print(cbind(x$output, x$segRows))
}
} else {
print(x$output)
}
}
subset.DNAcopy <- function(x, chromlist=NULL, samplelist=NULL, ...)
{
if (!inherits(x, 'DNAcopy')) stop("First arg must be of class DNAcopy")
zdat <- x$data
zres <- x$output
chrom <- zdat$chrom
uchrom <- unique(chrom)
if (missing(chromlist) | is.null(chromlist)) chromlist <- uchrom
if (length(setdiff(chromlist, uchrom)) > 0)
stop("chromlist contains chromosomes not in the data")
if (length(chromlist) > length(unique(chromlist)))
warning("duplicate chromosomes in chromlist removed")
sampleid <- colnames(zdat)[-(1:2)]
if (missing(samplelist)) samplelist <- sampleid
nsample <- length(sampleid)
if (length(setdiff(samplelist, 1:nsample)) > 0 & length(setdiff(samplelist, sampleid)) > 0)
stop("samplelist should be a list of valid sample numbers or names")
if (!is.numeric(samplelist)) samplelist <- match(samplelist, names(zdat)) - 2
if (length(samplelist) > length(unique(samplelist)))
warning("duplicate samples in samplelist removed")
samplelist <- unique(samplelist)
jj <- unlist(sapply(sampleid[samplelist], function(i, id) {which(id==i)}, zres$ID ))
zres <- zres[jj,]
y <- list()
y$data <- zdat[chrom %in% chromlist,c(1:2,samplelist+2)]
attr(y$data, "data.type") <- attr(zdat, "data.type")
y$output <- zres[zres$chrom %in% chromlist,]
class(y) <- "DNAcopy"
y
}
# Chromosome.Lengths <- c(263, 255, 214, 203, 194, 183, 171, 155, 145, 144, 144, 143, 114, 109, 106, 98, 92, 85, 67, 72, 50, 56, 164, 59)
# names(Chromosome.Lengths) <- c(as.character(1:22),"X","Y")
|
f4513e65e264fdccb1446e659b28456f2b3f6972
|
7a0ec22b3e72d18a14d34a6ce05869f0fdd8492b
|
/data-raw/make_kenya.R
|
fb668b75b3a76f28440096fb2b2b9c1aa3414180
|
[
"MIT"
] |
permissive
|
tcweiss/primer.data
|
cb202567e34fc7198f4c5fb45e5f9793b3e0c167
|
7a7b4c5c0d71ab2d2b5c4dee3211f3c8a9c4320b
|
refs/heads/master
| 2023-08-09T18:46:38.351436
| 2021-08-19T17:22:29
| 2021-08-19T17:22:29
| 320,058,988
| 0
| 0
|
NOASSERTION
| 2020-12-09T19:29:55
| 2020-12-09T19:29:54
| null |
UTF-8
|
R
| false
| false
| 1,929
|
r
|
make_kenya.R
|
# work in progress preparing the data set from Harris, Kamindo, and Windt (2020):
# Electoral Administration in Fledgling Democracies: Experimental Evidence from Kenya
# The replication data can be located at:
# https://dataverse.harvard.edu/dataset.xhtml;jsessionid=786d92e2aec3933f01bd0af48ec0?persistentId=doi%3A10.7910%2FDVN%2FUT25HQ&version=&q=&fileTypeGroupFacet=%22Code%22&fileAccess=&fileTag=&fileSortField=&fileSortOrder=
# DISCARDED VARIABLES: weight (needed?), INTERVENTION (same as the first one?), DATE_DAY1 (include this or date or both?)
# TO-DO: Go from individuals --> communities, documentation not showing up
library(tidyverse)
library(usethis)
# diff read function
x <- readRDS('data-raw/kenyadata.Rds')
x <- x %>%
# Recoding values for treatment variable
mutate(treatment = as.factor(case_when(treat == "Control" ~ "control",
treat == "SMS" ~ "SMS",
treat == "Canvass" ~ "canvass",
treat == "Local" ~ "local",
treat == "Local+Canvass" ~ "local + canvass",
treat == "Local+SMS" ~ "local + SMS"))) %>%
# Renaming variables for poverty, population density, date, block id, polling station id
mutate(block = as.character(BLOCK_ID),
poll_station = PS_ID,
poverty = pov,
pop_density = pd,
mean_age = mean.age,
reg_byrv13 = reg_int_byrv13) %>%
# Selecting final variabless
select(block, poll_station, treatment, poverty, distance, pop_density, mean_age, reg_byrv13, rv13) %>%
as_tibble()
stopifnot(nrow(x) > 1600)
stopifnot(ncol(x) == 9)
stopifnot(is.factor(x$treatment))
stopifnot(is.character(x$block))
kenya <- x
usethis::use_data(kenya, overwrite = TRUE)
# Todo: Check if reg/rv13 descriptions and date descriptions ar right.
|
d78dca25c7bd869b114dbd9533ef6a2be0ad785c
|
e725e28b66bf0a13793f172b51c0111a068bcfbe
|
/tests/testthat/test-methods.R
|
6188cfdb5e493a8ebded150e129cefdd6300c561
|
[
"Apache-2.0"
] |
permissive
|
mikemahoney218/typogenerator
|
86aa47c3450654df6ef2de4ac2c44a227671c82b
|
23cf8a5ed05f10adf2029c4a6cb482a6f6e376c2
|
refs/heads/main
| 2023-03-18T09:23:43.327024
| 2021-03-03T00:12:48
| 2021-03-03T00:12:48
| 343,555,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 432
|
r
|
test-methods.R
|
test_that("methods are stable", {
expect_true(
all(
typo_vowelswap("Michael")[[1]] %in% typo_bitsquat(
"Michael",
c("a", "e", "i", "o", "u")
)[[1]]
)
)
expect_equal(
length(typo_doublehit("Michael")[[1]]),
60
)
expect_equal(
typo_hyphenation("Michael", ";"),
typo_subdomain("Michael", ";")
)
expect_true(
all(nchar(typo_replace("Michael")[[1]]) == 7)
)
})
|
e7dd5488c7d2a2f1a166cb469ef13033e3f1d8da
|
224c39c8c38dc3ac7d86dfd596f4d434704c5c8a
|
/run_analysis.R
|
d64ebb4f198ff77d601ec40f956114dbdbc7f0f5
|
[] |
no_license
|
glebart/getting_cleaning_data_course_proj
|
4b9d56024a4edd943829663b76918d9f4fb94a2f
|
ac814072d30b073d48e140a95bf4868ae6707d26
|
refs/heads/master
| 2020-03-24T20:48:47.592015
| 2018-07-31T11:12:29
| 2018-07-31T11:12:29
| 142,997,784
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,753
|
r
|
run_analysis.R
|
library(data.table)
library(magrittr)
options(stringsAsFactors = F)
path=paste0(getwd(),"/UCI HAR Dataset/")
features= paste0(path,"features.txt") %>% read.table
activity_labels= paste0(path,"activity_labels.txt") %>% read.table
cols=features$V2 %>% grep("mean\\(\\)|std\\(\\)",.)
datalist=list()
for (i in c("train","test")){
#reading x and selecting columns
x = path %>% paste0("%s/x_%s.txt") %>% sprintf(i,i) %>% read.table
x=x[,cols]
names(x)=features$V2[cols]
#reading y
y = path %>% paste0("%s/y_%s.txt") %>% sprintf(i,i) %>% read.table
names(y)="activity"
# reading subjects
subject = path %>% paste0("%s/subject_%s.txt") %>% sprintf(i,i) %>% read.table
names(subject)="subject"
#combining to one data.frame
tmp= cbind(x,y,subject)
#adding sample info
tmp$sample=i
datalist[[i]]=tmp %>% setDT
}
#getting rid off temporary data
rm(x,y,subject,tmp)
dt=rbindlist(datalist,fill=T)
# approptiate activity labeling
dt$activity %<>% factor(labels=activity_labels$V2)
#dt is desired data.frame/data.table
write.csv(dt,"clean_data.csv",row.names = F)
#oohh on submiting there is txt requirement
write.table(dt,"clean_data.txt",row.name = F)
#mean summmary from dt
dtmeans=dt[,lapply(.SD, mean),by=.(activity,subject,sample)]
write.csv(dtmeans,"clean_data_means.csv",row.names = F)
#oohh on submiting there is txt requirement
write.table(dtmeans,"clean_data_means.txt",row.name = F)
dtmeans=dt[,lapply(.SD, mean),by=.(subject,sample),.SDcols=1:length(cols)]
write.csv(dtmeans,"clean_data_subject_means.csv",row.names = F)
dtmeans=dt[,lapply(.SD, mean),by=.(activity,sample),.SDcols=1:length(cols)]
write.csv(dtmeans,"clean_data_activity_means.csv",row.names = F)
|
6eb04c55e1e595c0a7f239cd4891588f56e3f5db
|
b9ef047b65e05a6a9dd665a496c37689d628e5c1
|
/man/fill_array3d.Rd
|
ccff13c1af48dc86f8f9c50544bb5659c667bb0a
|
[] |
no_license
|
kongdd/RcppArray
|
e65c1049dc5ffbe7c9807cf5d6a1fd83d1232af0
|
b720d8139a5611c1e6c8194bba8f0b7b8b2fe4ea
|
refs/heads/master
| 2020-04-07T04:58:46.570131
| 2018-11-20T01:43:00
| 2018-11-20T01:43:00
| 158,079,307
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,071
|
rd
|
fill_array3d.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{fill_array3d}
\alias{fill_array3d}
\title{fill_array3d}
\usage{
fill_array3d(mat, newmat, i_start = 1L, j_start = 1L, k_start = 1L)
}
\arguments{
\item{mat}{A 3d Array.}
\item{newmat}{A 3d Array to fill \code{mat}.}
\item{i_start}{Integer, row of \code{mat} starting to fill.}
\item{j_start}{Integer, col of \code{mat} starting to fill.}
\item{k_start}{Integer, slice of \code{mat} starting to fill.}
}
\description{
This function is just for Rcmip5 package when combine 3d array.
}
\details{
Note that if data types of \code{mat} and \code{newmat} are different,
values in \code{mat} will be \strong{not replaced.}
}
\examples{
mat <- array(2, dim = c(3, 3, 3))
newmat <- array(1, dim = c(3, 3, 3))
fill_array3d(mat, newmat)
mat
}
\references{
[1] https://stackoverflow.com/questions/30969799/column-means-3d-matrix-cube-rcpp \cr
[2] https://stackoverflow.com/questions/34161593/how-to-replace-elements-of-a-matrix-in-c-with-values-from-another-matrix-usin \cr
}
|
651bde1757f457463c88ec8cd8a1bdf07c7dcc71
|
29a2daaa4aadaa7e04987cf1aaed0501362028ae
|
/additional_functions/_init.time.sim.R
|
010580b6b89d8d1001aff2bb6a4aba6e2d6b7195
|
[] |
no_license
|
lfcampos/Space-and-time-with-fixed-K
|
e7a58d5efcb4b280fe355431a096679149b3c836
|
50b15dfdb8dcc63251362a825e6303d3e037dcf5
|
refs/heads/master
| 2022-10-28T08:24:54.517237
| 2020-06-08T07:47:28
| 2020-06-08T07:47:28
| 101,098,105
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,255
|
r
|
_init.time.sim.R
|
source(paste(home,"/additional_functions/", "initialization.time.simulation.R",sep=""))
k_curr <- theta # Number of sources
initialize_list <- initialization.time.simulation(params)
mix_num <- k_curr+1 # Number of mixture components (sources + background)
mu_curr <- t(initialize_list[[1]]) # Locations of sources (2 x k_curr matrix) - locations initially on edge of image
w <- initialize_list[[2]] # Relative intensities (vector of length mix_num)
eparas_all <- initialize_list[[3]] # Shape and mean spectral parameters - disregard if spectral_model=="none"
ewt_all <- initialize_list[[4]] # Spectral model weights (extended full model) - disregard unless spectral_model=="extended_full"
allocate_curr <- initialize_list[[5]] # Allocation of photons to sources (and background) - initially obs_num x mix_num matrix of zeros
lambda <- initialize_list[[8]] # Relative Intensities of time arrival distribution
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Initialize breakpoints and time_bins
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
bk = list()
time_bin = list()
if(time_spacing == 'test'){
q = as.numeric(q)
num_time_breaks = c(sapply(params$breakpoints, length)[1:2]-1, 1)
bk = params$breakpoints
bk[[3]] = c(min(params$breakpoints[[3]]), max(params$breakpoints[[3]]))
# add jitter to the middle breakpoints
for(i in 1:(k_curr)){
# bk[[i]][-c(1, num_time_breaks[i] + 1)] = bk[[i]][-c(1, num_time_breaks[i] + 1)] + rnorm(num_time_breaks[i]-1, 0, q)
bk[[i]][-c(1, num_time_breaks[i] + 1)] = bk[[i]][-c(1, num_time_breaks[i] + 1)] + q
}
for(i in 1:(k_curr + 1)){
time_bin[[i]] = cut(arrival_time, bk[[i]], include.lowest = TRUE)
levels(time_bin[[i]]) = 1:num_time_breaks[i]
time_bin[[i]] = as.numeric(time_bin[[i]])
}
eparas_all_tmp = list()
lambda_tmp = list()
for(i in 1:(k_curr)){
eparas_all_tmp[[i]] = list()
for(k in 1:num_time_breaks[i]){
eparas_all_tmp[[i]][[k]] = matrix(eparas_all[[k]][i,], nrow = 2, ncol = 1)
}
}
eparas_all = eparas_all_tmp
# initialize lambda
for(i in 1:(k_curr+1)){
lambda_tmp[[i]] = prop.table(table(time_bin[[i]][allocate_curr[,i]==1]))
}
eparas_all = eparas_all_tmp
lambda = lambda_tmp
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Merging Breakpoints: we test merging a few breakpoints
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if(time_spacing == 'test_merging'){
# test case have 5 bins, let's replicate what BB is doing
# [1] 0.0006081108 0.2022975916 0.6218380892 0.9987439354
# [1] 0.0006081108 0.3629292103 0.9987439354
# [1] 0.0006081108 0.9987439354
# bright: merge
num_time_breaks = c(3, 2, 1)
bk = params$breakpoints
bk[[1]] = bk[[1]][c(1, 2, 4, 6)]
bk[[2]] = bk[[2]][c(1, 3, 6)]
bk[[3]] = c(min(params$breakpoints[[3]]), max(params$breakpoints[[3]]))
for(i in 1:(k_curr + 1)){
time_bin[[i]] = cut(arrival_time, bk[[i]], include.lowest = TRUE)
levels(time_bin[[i]]) = 1:num_time_breaks[i]
time_bin[[i]] = as.numeric(time_bin[[i]])
}
eparas_all_tmp = list()
lambda_tmp = list()
for(i in 1:(k_curr)){
eparas_all_tmp[[i]] = list()
for(k in 1:num_time_breaks[i]){
eparas_all_tmp[[i]][[k]] = matrix(eparas_all[[k]][i,], nrow = 2, ncol = 1)
}
}
eparas_all = eparas_all_tmp
# initialize lambda
for(i in 1:(k_curr+1)){
lambda_tmp[[i]] = prop.table(table(time_bin[[i]][allocate_curr[,i]==1]))
}
eparas_all = eparas_all_tmp
lambda = lambda_tmp
}
if(time_spacing == 'test_merging_one'){
# test case have 5 bins, let's merge just one block see what effect is
num_time_breaks = c(4, 4, 1)
bk = params$breakpoints
bk[[1]] = bk[[1]][-2]
bk[[2]] = bk[[2]][-5]
bk[[3]] = c(min(params$breakpoints[[3]]), max(params$breakpoints[[3]]))
for(i in 1:(k_curr + 1)){
time_bin[[i]] = cut(arrival_time, bk[[i]], include.lowest = TRUE)
levels(time_bin[[i]]) = 1:num_time_breaks[i]
time_bin[[i]] = as.numeric(time_bin[[i]])
}
eparas_all_tmp = list()
lambda_tmp = list()
for(i in 1:(k_curr)){
eparas_all_tmp[[i]] = list()
for(k in 1:num_time_breaks[i]){
eparas_all_tmp[[i]][[k]] = matrix(eparas_all[[k]][i,], nrow = 2, ncol = 1)
}
}
eparas_all = eparas_all_tmp
# initialize lambda
for(i in 1:(k_curr+1)){
lambda_tmp[[i]] = prop.table(table(time_bin[[i]][allocate_curr[,i]==1]))
}
eparas_all = eparas_all_tmp
lambda = lambda_tmp
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if(time_spacing == 'simulation'){
num_time_breaks = c(sapply(params$breakpoints, length)[1:2]-1, 1)
bk = params$breakpoints
bk[[3]] = c(min(params$breakpoints[[3]]), max(params$breakpoints[[3]]))
for(i in 1:(k_curr + 1)){
time_bin[[i]] = cut(arrival_time, bk[[i]], include.lowest = TRUE)
levels(time_bin[[i]]) = 1:num_time_breaks[i]
time_bin[[i]] = as.numeric(time_bin[[i]])
}
eparas_all_tmp = list()
lambda_tmp = list()
for(i in 1:(k_curr)){
eparas_all_tmp[[i]] = list()
for(k in 1:num_time_breaks[i]){
eparas_all_tmp[[i]][[k]] = matrix(eparas_all[[k]][i,], nrow = 2, ncol = 1)
}
}
eparas_all = eparas_all_tmp
# initialize lambda
for(i in 1:(k_curr+1)){
lambda_tmp[[i]] = prop.table(table(time_bin[[i]][allocate_curr[,i]==1]))
}
eparas_all = eparas_all_tmp
lambda = lambda_tmp
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Equal time spacing
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if(time_spacing == 'equal'){
for(i in 1:(k_curr + 1)){
bk[[i]] = seq(min(arrival_time), max(arrival_time), length.out = num_time_breaks + 1)
time_bin[[i]] = cut(arrival_time, bk[[i]], include.lowest = TRUE)
levels(time_bin[[i]]) = 1:num_time_breaks
time_bin[[i]] = as.numeric(time_bin[[i]])
}
num_time_breaks = rep(num_time_breaks, k_curr + 1)
# rearrange initial (alpha, gamma)
eparas_all_tmp = list()
lambda_tmp = list()
for(i in 1:(k_curr)){
eparas_all_tmp[[i]] = list()
for(k in 1:num_time_breaks[i]){
eparas_all_tmp[[i]][[k]] = matrix(eparas_all[[k]][i,], nrow = 2, ncol = 1)
}
}
eparas_all = eparas_all_tmp
# initialize lambda
for(i in 1:(k_curr+1)){
lambda_tmp[[i]] = prop.table(table(time_bin[[i]][allocate_curr[,i]==1]))
}
eparas_all = eparas_all_tmp
lambda = lambda_tmp
}
if(time_spacing == 'bayesian.blocks'){
library(reticulate)
astropy <- import("astropy")
num_time_breaks = rep(NA, k_curr + 1)
for(i in 1:(k_curr)){
bk[[i]] = astropy$stats$bayesian_blocks(c(min(arrival_time), arrival_time[allocate_curr[,i]==1], max(arrival_time)))
num_time_breaks[i] = length(bk[[i]]) - 1
time_bin[[i]] = cut(arrival_time, bk[[i]], include.lowest = TRUE)
levels(time_bin[[i]]) = 1:length(levels(time_bin[[i]]))
time_bin[[i]] = as.numeric(time_bin[[i]])
}
# background photons
bk[[k_curr + 1]] = c(min(arrival_time), max(arrival_time))
num_time_breaks[k_curr + 1] = 1
time_bin[[k_curr + 1]] = rep(1, length(arrival_time))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Reformat other initialization parameters based on these breakpoints
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
eparas_all_tmp = list()
lambda_tmp = list()
# gamma MLE
f = function(theta){
alpha = theta[1]
gamma = theta[2]
-sum(dgamma(eik, gamma, gamma/alpha, log = TRUE))
}
# initialize (alpha, gamma) at MLE for each source/timepoint
for(i in 1:(k_curr)){
eparas_all_tmp[[i]] = list()
for(k in 1:num_time_breaks[i]){
eik = energy[allocate_curr[,i]==1 & time_bin[[i]] == k]
if(length(eik) < 20) eik = energy[allocate_curr[,i]==1]
MLE = optim(eparas_all[[i]][1,], f)$par
eparas_all_tmp[[i]][[k]] = matrix(MLE, nrow = 2, ncol = 1)
}
}
# initialize lambda
for(i in 1:(k_curr+1)){
lambda_tmp[[i]] = prop.table(table(time_bin[[i]][allocate_curr[,i]==1]))
}
eparas_all = eparas_all_tmp
lambda = lambda_tmp
}
|
3ece5a97a2e018c67c4eee72d711a7d9f2566d91
|
c071023e743755d79216b05d9b35ebf1889eb47b
|
/src/13_generate_variables_3.R
|
3eb23f4f914bf90dc192de68ac8d9ffa5c18da83
|
[] |
no_license
|
nishinoh/article_2021_jws
|
2ddd85daec6036235d28a97dd1124e3a9adc5554
|
9f95e0411ea66fbb6bf140847a2757f693f955e6
|
refs/heads/main
| 2023-03-01T11:08:55.478069
| 2021-02-17T06:15:53
| 2021-02-17T06:15:53
| 328,964,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,183
|
r
|
13_generate_variables_3.R
|
##### 0. 前準備 ========================================================
# library(tidyverse)
library(dplyr)
library(tidyr)
library(stringr)
library(forcats)
library(tibble)
library(readr)
# データのファイルパス。各自のパソコンのディレクトリ構造に合わせて書き替える。
DATA_PATH <- "~/Data/JAHEAD/Process_Files/"
# これまでのデータの読み込み
load(str_c(DATA_PATH, "data_after_12.rda"))
##### 1. 公的ケアの変数を準備 =================================
# DKなどを欠損にして、数値型に変更する
recodePublicService <- function(data, variable){
variable_new <- str_c(variable, "_n")
data <- data %>%
# しばらくは文字列型で操作して、最後に数値型にする
mutate(!!(variable) := as.character(.[[variable]])) %>%
mutate(!!(variable_new) := case_when(.[[variable]]=="利用していない" ~ "0",
.[[variable]]=="DK" ~ NA_character_,
.[[variable]]=="DK/NA" ~ NA_character_,
.[[variable]]=="非該当" ~ "0",
TRUE ~ .[[variable]])) %>%
mutate(!!(variable_new) := as.numeric(.[[variable_new]]))
}
data_long <- recodePublicService(data_long, "use_dayservice")
data_long <- recodePublicService(data_long, "use_shortstay")
data_long <- recodePublicService(data_long, "use_homehelp")
# 公的サービスを利用しているか否かのダミー変数を作成
data_long <- data_long %>%
mutate(use_dayservice_d = if_else(use_dayservice_n > 0, 1, 0),
use_shortstay_d = if_else(use_shortstay_n > 0, 1, 0),
use_homehelp_d = if_else(use_homehelp_n > 0, 1, 0),
use_publicservices_d = if_else(use_dayservice_d + use_shortstay_d + use_homehelp_d >= 1, 1, 0))
##### Fin. 作成したファイルを保存 ================================================
# 作成したファイルを保存し、これまで作ったオブジェクトはいったん全て削除
save(data_long, file=str_c(DATA_PATH, "data_after_13.rda"))
rm(list = ls())
|
24c07d9bba5847380cbf9d2379b85f3e09acd6a9
|
27773c13edf3b66ae7b9957db384137f8eb85f50
|
/man/highdimdiffindiff_crossfit.Rd
|
bd164c0a8ddde1f3a086460817a1b412c467d359
|
[
"MIT"
] |
permissive
|
abhradeepmaiti/HDdiffindiff
|
e333854bc94af5ee4f1982715b400b3c637ca1c2
|
f66b6d6b8dda7d58d0bc8416aed4da943e2fd459
|
refs/heads/master
| 2023-03-17T22:55:31.919712
| 2020-07-23T15:48:59
| 2020-07-23T15:48:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,365
|
rd
|
highdimdiffindiff_crossfit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/highdimdiffindiff_crossfit.R
\name{highdimdiffindiff_crossfit}
\alias{highdimdiffindiff_crossfit}
\title{Perform doubly robust diff-in-diff estimator under high dimesional covariates}
\usage{
highdimdiffindiff_crossfit(y0, y1, treat, x, z, k = 3, method = "Pol", q)
}
\arguments{
\item{y0}{outcome at time 0}
\item{y1}{outcome at time 1}
\item{treat}{treatment status at time 1}
\item{x}{pretreatment high dimensional covariates}
\item{z}{pretreatment low diemsioanl covariates allowing nonparametric specification}
\item{k}{number of sample split: default is 3}
\item{method}{basis function in nonparametric specification:
\itemize{
\item (default) Pol: Polynomial basis with parameter q/2 as the degree of polynomial;
\item Tri: Trigonometric polynomials with parameter q/2 as the degree of polynomial;
}}
\item{q}{paramter for the basis function in nonparametric specification}
}
\value{
\item{xdebias}{debiased estimator for the high dimensional coefficients}
\item{gdebias}{debiased estimator for the nonparametric coefficients}
\item{stdx}{standard error for the high dimensional coefficients}
\item{stdg}{standard error for the nonparametric coefficients}
}
\description{
Perform doubly robust diff-in-diff estimator under high dimesional covariates
}
\author{
Sida Peng
}
|
677046278f15c16ad949b086d1f573fbea4ac977
|
a2ba705d17e9ac508303132f0c582039fc484731
|
/inst/snippets/pizza-domain-command.R
|
75ed8cb02d83dfaa9a112460850ae097015f10ad
|
[
"MIT"
] |
permissive
|
tidylab/ddd
|
5b6e5e6e59ebc39b10fcb3e576f9b3f5ce05f23d
|
cec12d4fc114be99581910e50a0c67cd417fa514
|
refs/heads/master
| 2023-08-26T08:34:13.103279
| 2021-01-04T23:37:46
| 2021-01-04T23:37:46
| 300,146,337
| 6
| 0
|
NOASSERTION
| 2020-12-07T22:00:59
| 2020-10-01T04:45:56
|
R
|
UTF-8
|
R
| false
| false
| 383
|
r
|
pizza-domain-command.R
|
add_step("start_order", domain = "pizza_ordering")
add_step("add_pizza_to_order", domain = "pizza_ordering")
add_step("select_pizza_size", domain = "pizza_ordering")
add_step("add_topping", domain = "pizza_ordering")
add_step("remove_topping", domain = "pizza_ordering")
add_step("review_pizza", domain = "pizza_ordering")
add_step("confirm_order", domain = "pizza_ordering")
|
4536979cacebf8a078de18d46729055fe8b36014
|
279d1f7d6a3b8ba82b5c89c987057f40c0868fbb
|
/codebook.md.R
|
b524e6c10371e2ff2ea57a328fc47a16a783bbd1
|
[] |
no_license
|
Andrew316/Final-Assignment-week-4
|
c067c69f7c079296db8559aa868a7651ff93103b
|
7980c0e668eb3d7af925106b3574704f4985d242
|
refs/heads/main
| 2023-01-06T02:20:51.568332
| 2020-10-31T01:53:57
| 2020-10-31T01:53:57
| 307,886,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
codebook.md.R
|
The variables that I used were the featureNames and the activityLabels because
they were the two metadatas of the project within the UCI Har datasets.
In the UCI Har Dataset there are two folders that contain the information of test
and train.
Part 1 is to merge the datasets of both train and test to create one dataset
Part 4: This variable, extracteddata, is showing the information from the data
set.
Part 5: extractedData is used and within addition of subject.
main
git add
V1: This includes the subject variable, collected from the accelerometers from the Samsung Galaxy S smartphone.
V2: The different data collected from accelerometer, gyroscope, body, magnitud, time, frequency, time body, mean,
standard deviation, angle and gravity from subjects who use the Samsung Galaxy S smartphone.
V3: The activities made for the subjects, walking, walking_upstairs, walking_downstairs, sitting, standing and laying.
|
a42aec1f9b34a3a77e844051384e72771a0ec5f2
|
f10d81106efc6330c43c82f027fdb238fed4745e
|
/TrigramGeneration.r
|
e38ce483d365fb4602f0ffb1400bd6eab92c639a
|
[] |
no_license
|
kevingscott/CouseraCapstone
|
be79303751fe592fcd26bdd17d02d2a7e6306b75
|
1c16aef1924949ce924adcf04c92097ded1ff055
|
refs/heads/master
| 2021-01-15T16:16:18.567125
| 2015-03-27T01:42:55
| 2015-03-27T01:42:55
| 32,428,184
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,596
|
r
|
TrigramGeneration.r
|
done <- FALSE
buffer_size <- 100000
rows_proc <- 0
global_counts <- data.frame(word = character(),prev_word = character(), freq = integer())
TrigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
trigram_token <- function(these_lines){
#endline_regex <- "[\.\?\!]\s"
remove_non_words <- "[^[:alpha:] ']"
clean_lines <- gsub(remove_non_words,'',tolower(these_lines))
tokens <- TrigramTokenizer(clean_lines)
return(tokens)
}
sample_rate <- .15
addFileToWordCount <- function(filename,count_df)
{
con <- file(filename, 'r')
while(!done)
{
lines <- readLines(con,buffer_size)
if(length(lines) != buffer_size)
done <- TRUE
lines <- lines[sample(1:buffer_size,floor(sample_rate*buffer_size))]
tokens <- trigram_token(lines)
counts <- count(tokens)
counts$x <- as.character(counts$x)
count_df <- rbind(count_df, counts)
rows_proc <- rows_proc + length(lines)
print(rows_proc)
}
return(count_df)
}
convertToSeperateCOlumns <- function(df){
}
global_counts <-addFileToWordCount(twitter_file_name,global_counts)
sum_twitter_counts <- ddply(global_counts,'x',summarize,freq=sum(freq))
write.csv(sum_twitter_counts,file="sum_twitter_counts_tri")
sum_twitter_counts <- NULL
global_counts <- data.frame(word = character(),prev_word = character(), freq = integer())
global_counts <-addFileToWordCount(blogs_file_name,global_counts)
sum_blogs_counts <- ddply(global_counts,'x',summarize,freq=sum(freq))
write.csv(sum_blogs_counts,file="sum_blogs_counts_tri")
sum_blogs_count <- NULL
global_counts <- data.frame(word = character(),prev_word = character(), freq = integer())
global_counts <-addFileToWordCount(news_file_name,global_counts)
sum_news_counts <- ddply(global_counts,'x',summarize,freq=sum(freq))
write.csv(sum_news_counts,file="sum_newsr_counts_tri")
sum_news_counts <- NULL
sum_newsr_counts <- read.csv("~/sum_newsr_counts_tri", stringsAsFactors=FALSE)
sum_twitter_counts <- read.csv("~/sum_twitter_counts_tri", stringsAsFactors=FALSE)
sum_blogs_counts <- read.csv("~/sum_blogs_counts_tri", stringsAsFactors=FALSE)
twitter_news_counts <- rbind(sum_twitter_counts,sum_newsr_counts)
all_counts <- rbind(twitter_news_counts,sum_blogs_counts)
all_counts = data.table(all_counts[,2:3])
all_counts_sum <- all_counts[,sum(freq),by=x]
write.csv(all_counts ,file="allr_counts_tri")
all_counts_tri <- read.csv("~/allr_counts_tri", stringsAsFactors=FALSE)
done <- FALSE
buffer_size <- 100000
rows <- 1
write_con <- file('tri_final','w')
while(!done)
{
dt <- data.table(all_counts_tri[rows:(rows+buffer_size),])
dt <- dt[,c("first_word","second_word","third_word"):=data.table(str_split_fixed(x," ",3))]
write.csv(dt[,3:6,with=FALSE],write_con)
rows <- rows + buffer_size
print(rows)
if(rows >= nrow(all_counts_tri))
done <- TRUE
}
all_counts_final <- read.csv("~/tri_final", stringsAsFactors=FALSE)
all_counts_final$bigram <- paste(all_counts_final$first_word, all_counts_final$second_word)
all_counts_final <- data.table(all_counts_final)
setkey(all_counts_final,bigram)
all_counts_final$freq <- as.numeric(all_counts_final$freq)
total_bigram_counts <- all_counts_final[,sum(freq),by=bigram]
setkey(total_bigram_counts,bigram)
setPercent <- function(row){
total <- total_bigram_counts[row[1],]
if(is.na(total$count) == TRUE){
return(0)
}
#print(row)
#print(total)
return (as.numeric(row[2])/total$count)
}
total_bigram_counts$probability <- apply(all_counts_final[,c(6,2),with=FALSE],1,setPercent)
|
abeda875206c4139ed7610278106d2cb754abd37
|
86542a2f8c34345dd1f86047b02672755dcf8f48
|
/8.R
|
52f46825a8606f983b17bd7ab9a7928084c43e54
|
[] |
no_license
|
jenchen08/RRRR-STUFF
|
3bc667f09f25ab5f0d05428eaa6fa3aad1589033
|
2232e6740476fa8272ee6bac16bd1ed4c4d88dee
|
refs/heads/master
| 2020-04-16T22:33:52.295568
| 2019-01-16T04:49:32
| 2019-01-16T04:49:32
| 165,973,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 243
|
r
|
8.R
|
set.seed(4901)
n = 10000
sum = 0
sum1 = 0
for (i in 1:n) {
u = runif(1, min = 18, max = 25)
y = (53 - u)
sum = y * rnorm(1, mean = 1200, sd = 300)
loss = sum - 20000
if (loss < 0) {
sum1 = sum1 + 1
}
}
print(sum1 / n)
|
a1610d7744f008a418d23a89253ac3b7f359f24a
|
12280abdaa1c352417797406290364bbe8109250
|
/Fig1C_AML_cytscore_flow_RNAseq_comparison.R
|
aab3b5da5b3ad741beb903ff0b3a4d6fd6c44980
|
[] |
no_license
|
ppolonen/ImmunogenomicLandscape-BloodCancer
|
aeca4e50c2bd60ec8fe4e69291e2a617b2b5f21a
|
234fd9438c68bfc11c897282508236175bb42bc3
|
refs/heads/master
| 2023-03-19T21:38:57.259707
| 2021-03-23T19:04:03
| 2021-03-23T19:04:03
| 306,509,974
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,350
|
r
|
Fig1C_AML_cytscore_flow_RNAseq_comparison.R
|
# Plot comparison of AML BM T/NK% based on flow cytometry vs cytolytic score (Figure 1C)
library(dplyr)
library(ggplot2)
library(cowplot)
library(ggpubr)
library(gridExtra)
# read in lymphocyte fractions out of total BM cells (gated in FlowJo)
data <- read.table("aml_bm_tnk_cytscore_comparison.txt", header = TRUE)
# add column with T and NK fraction sum
data$T_NK <- data$T+data$NK
# fractions to percentages
data$T_NK_per <- data$T_NK*100
data$T_per <- data$T*100
data$NK_per <- data$NK*100
# Scatter plot for Figure 1C
pdf("Figure1C_AML_cytolytic_score_flow_scatter.pdf", height = 2.5, width = 2.5)
ggscatter(data, x = "Cytolytic_score", y = "T_NK_per",
size = 1.5,
add = "reg.line", # Add regression line
add.params = list(color = "blue", fill = "lightgray", size = 1), # Customize reg. line
conf.int = TRUE # Add confidence interval
) +
stat_cor(method = "spearman", label.x = 2.5, label.y = 25, label.sep = "\n") +
xlab("Cytolytic score (RNA-seq)") +
ylab("% T/NK cells (flow cytometry)")
dev.off()
# T and NK cells separately (Figure S1)
t <- ggscatter(data, x = "Cytolytic_score", y = "T_per",
size = 1.5,
add = "reg.line", # Add regression line
add.params = list(color = "blue", fill = "lightgray", size = 1), # Customize reg. line
conf.int = TRUE # Add confidence interval
) +
stat_cor(method = "spearman", label.x = 1.5, label.y = 25, label.sep = "\n") +
xlab("Cytolytic score\n(RNA-seq)") +
ylab("% T cells in AML BM\n(flow cytometry)") +
ggtitle("T cells") +
theme(plot.title = element_text(hjust = 0.5))
nk <- ggscatter(data, x = "Cytolytic_score", y = "NK_per",
size = 1.5,
add = "reg.line", # Add regression line
add.params = list(color = "blue", fill = "lightgray", size = 1), # Customize reg. line
conf.int = TRUE # Add confidence interval
) +
stat_cor(method = "spearman", label.x = 1.5, label.y = 7, label.sep = "\n") +
xlab("Cytolytic score\n(RNA-seq)") +
ylab("% NK cells in AML BM\n(flow cytometry)") +
ggtitle("NK cells") +
theme(plot.title = element_text(hjust = 0.5))
# Print pdf for Figure S1E
pdf("FigureS1E_AML_cytolytic_score_flow_TNKseparately_scatter.pdf", height = 2.5, width = 4)
grid.arrange(t, nk, ncol = 2)
dev.off()
|
f0570903c6815fc3fbf985ca1cf9e9c70d914c51
|
21d254c5de5d382c54e9bc518ab315c8526e1fc7
|
/man/DR_JonkerSA_Hill2.Rd
|
ebed5dfa3b0a4a89bd32627ac630a5e6bbfb24ba
|
[] |
no_license
|
gilles-guillot/MDR
|
7d5f2ff1b686d4444a6c72a3d9aa0964f0b6884f
|
7eb1e050f861f190312060583b61bff07601f0ad
|
refs/heads/master
| 2020-09-14T17:00:02.201598
| 2020-01-20T10:42:27
| 2020-01-20T10:42:27
| 223,192,822
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,088
|
rd
|
DR_JonkerSA_Hill2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DR_Jonker_Hill2.R
\name{DR_JonkerSA_Hill2}
\alias{DR_JonkerSA_Hill2}
\title{Bivariate dose-response function under Jonker S/A model}
\usage{
DR_JonkerSA_Hill2(d1, d2, a1, b1, a2, b2, alpha)
}
\arguments{
\item{d1}{dose of first compound: a numeric vector}
\item{d2}{dose of second compound: a numeric vector}
\item{a1}{parameter a in Hill-2 DR fuction 1/(1+(a/x)**b) for first compound}
\item{b1}{parameter b in Hill-2 DR fuction 1/(1+(a/x)**b) for first compound}
\item{a2}{parameter a in Hill-2 DR fuction 1/(1+(a/x)**b) for second compound}
\item{b2}{parameter b Hill-2 DR fuction in 1/(1+(a/x)**b) for second compound}
\item{alpha}{parameter quantifying departure from Loewe additivity in Jonker S/A model}
}
\value{
vector of numerical responses
}
\description{
Compute bivariate dose-response function when each mixture compoent follows a 2-parameter Hill dose-response function and Jonker S/A model holds.
}
\references{
Jonker model S/A (Eq. 7 p. 2703 Jonker et al. Env Tox & Chem 24(10) 2005)
}
|
4d7671b48a0f7edf675ff1337c3d25f68d724ce4
|
2e580e51cf9ddb8d71851d9dc33bc160c7cdbc36
|
/tidy.R
|
550ba847fd63cda387573ad0ebb94904377bcc86
|
[] |
no_license
|
hammoire/dengue_cohort_symptom
|
8b09a69d7e7537975dc9660b7bd82cd8937dfaa0
|
3b1830ea160b359f603c7c8e0f137436e765777a
|
refs/heads/master
| 2020-07-05T16:29:25.716330
| 2019-08-16T09:34:41
| 2019-08-16T09:34:41
| 202,699,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,471
|
r
|
tidy.R
|
# Check duplicates --------------------------------------------------------
#Start by defining duplicate rows into a single list of multiple dfs.
#ids var defines the vars for which dups have been checked
#These will need to be checked by hand
ids <- c("fs_id", "consent_id", "sample_collection_id_a", "sample_collection_id_c")
#function to select duplicates
select_dups <- function(df, var){
dup <- df[[var]][duplicated(df[[var]])]
df <- df[df[[var]] %in% dup & !is.na(df[[var]]),]
df$dup_problem <- var
df <- df[c(ncol(df), 2:ncol(df)-1)]
df
}
# duplicate_df <- map_df(ids, ~ select_dups(fs_pcr_elisa, .x))
# Join PCR ----------------------------------------------------------------
#Need to group_by sample_collection_id and then join to reduce duplicates
pcr_final <- pcr_definitive %>%
group_by(sample_collection_id_acute) %>%
summarise(sample_codes = str_c(sample_code_acute, collapse = "--"),
result_final = str_c(result_final, collapse = "--"))
fs_pcr <- fs_samples_raw %>%
select(-consent_id, -case_date, -first_symptom_date, -prior_dengue) %>%
left_join(pcr_final, by = c("sample_collection_id_a" = "sample_collection_id_acute"))
# Join ELISA --------------------------------------------------------------
#Create list of lists. 2 Lists IgM IgG: Each with individual pathogens
elisa_list <- elisa_raw %>%
select(sample_collection_id, sample_code, target_pathogen, result, titer, immunoglobulin) %>%
split(.$immunoglobulin) %>%
map(~ .x %>% split(.$target_pathogen))
#Rejoin all Igs
elisa_all_igm <- Reduce(function(...) full_join(..., by='sample_code', all.x=TRUE), elisa_list$IgM)
#Group by sample_collection_id:summarise all IgM DENV to a row per sample_collection_id. (same sample may have different codes)
elisa_igm_den <- elisa_list$IgM$DENV %>%
group_by(sample_collection_id) %>%
summarise(result = str_c(result, collapse = "--"),
titer = str_c(titer, collapse = "--"))
titre_tidy <- function(x){str_replace(x, "1:|1/", "")}
fs_pcr_elisa <- fs_pcr %>%
left_join(elisa_igm_den %>% select(sample_collection_id, result_igm_acute = result, titer_igm_acute = titer),
by = c("sample_collection_id_a" = "sample_collection_id")) %>%
left_join(elisa_igm_den %>% select(sample_collection_id, result_igm_con = result, titer_igm_con = titer),
by = c("sample_collection_id_c" = "sample_collection_id")) %>%
mutate_at(vars(matches("titer")), titre_tidy) %>%
mutate(conv_doi = second_specimen_date - first_specimen_date,
result_igm_acute = str_replace(result_igm_acute, "negative--negative", "negative"),
result_igm_con = str_replace(result_igm_con, "negative--negative", "negative"),
titer_igm_acute = str_replace(titer_igm_acute, "negative--negative", "negative"),
titer_igm_con = str_replace(titer_igm_con, "negative--negative", "negative"),
titer_igm_acute = str_replace(titer_igm_acute, "positive--positive", "positive"),
titer_igm_con = str_replace(titer_igm_con, "positive--positive", "positive"),
result_igm_acute = str_replace(result_igm_acute, "positive--positive", "positive"),
result_igm_con = str_replace(result_igm_con, "positive--positive", "positive"),
serol = case_when(is.na(result_igm_con) & is.na(result_igm_acute) ~ "NA",
(result_igm_acute == "negative" & result_igm_con == "negative") |
(titer_igm_acute == "negative" & titer_igm_con == "negative") ~ "NEG",
titer_igm_acute == "100" & titer_igm_con == "100" ~ "100",
(titer_igm_acute == "100" & is.na(titer_igm_con)) | (titer_igm_acute == "100" & titer_igm_con == "negative") ~ "100A",
(titer_igm_con == "100" & is.na(titer_igm_acute)) | (titer_igm_acute == "negative" & titer_igm_con == "100") ~ "100C",
as.numeric(titer_igm_con)/as.numeric(titer_igm_acute) >= 4 |
titer_igm_acute == "negative" & as.numeric(titer_igm_con) > 100 |
result_igm_acute == "negative" & as.numeric(titer_igm_con) > 100 ~ "SEROC",
as.numeric(titer_igm_acute) > 100 & is.na(titer_igm_con) ~ "ACUTE",
as.numeric(titer_igm_acute) > as.numeric(titer_igm_con) |
(as.numeric(titer_igm_acute) > 100 & titer_igm_con == "negative") ~ "DECLINE",
as.numeric(titer_igm_acute) < as.numeric(titer_igm_con) ~ "INCREASE",
titer_igm_acute == titer_igm_con ~ "SAME",
result_igm_acute == "negative" & titer_igm_con == "negative" ~ "NEG",
titer_igm_acute == "negative" & result_igm_con == "negative" ~ "NEG",
(result_igm_acute == "negative" | titer_igm_acute == "negative") & is.na(result_igm_con) ~ "NEG_NO_CON",
result_igm_con == "negative" & is.na(result_igm_acute) ~ "NEG_NO_ACU",
str_detect(titer_igm_con, "\\d") ~ "CON",
str_detect(titer_igm_acute, "100") ~ "100A",
str_detect(titer_igm_acute, "\\d") ~ "CON")) %>%
mutate(combo = case_when(str_detect(result_final, "DEN|ZIK") ~ result_final,
serol %in% c("ACUTE", "CON", "SEROC") ~ "SEROC",
result_final == "NEG" | serol == "NEG" ~ "NEG"),
combo = case_when(combo == "DEN2--DEN2" ~ "DEN2",
combo == "DEN3--DEN3" ~ "DEN3",
combo == "DEN4--DEN4" ~ "DEN4",
TRUE ~ combo)) %>%
# filter(!is.na(combo)) %>%
group_by(fs_id) %>%
sample_n(1) %>%
select(sex, birthdate, fs_id, result = combo, participant_codes)
# NOT CLEAN ---------------------------------------------------------------
test <- fs_pcr_elisa %>%
inner_join(fs_raw, by = "fs_id") %>%
mutate(doi = date - first_symptom_date) %>%
arrange(fs_id, doi)
# Dummify symptom_daily ---------------------------------------------------
#Create vector of unique possible symptoms
fs_symptoms_vec <- str_split(test$symptoms_daily, "; ")
fs_symptoms_vec <- sort(unique(unlist(fs_symptoms_vec)))
fs_symptoms_vec <- fs_symptoms_vec[fs_symptoms_vec != "Asymptomatic" & fs_symptoms_vec != "None"]
#str_detect doens't like bracketrs as special character so remove these
fs_symptoms_vec <- str_trim(str_replace_all(fs_symptoms_vec, "\\(.*\\).*", ""))
#Function to create list logicals. Each element of subsequent list is a single logical columne stating
#whether or not they had experienced that specific symtpom
symp_list <- map(fs_symptoms_vec, function(x){
str_detect(test$symptoms_daily, x)
})
#Name eachelement of the list so the relevant symptom is asigned to the relvant variable (new more succinct names)
symp_names <- {c("abdominal_pain", "back_pain", "gum_bleed",
"blood_stools", "body_pain", "chest_pain",
"chills", "cough", "diarrhea", "dizziness",
"bad_taste", "ear_pain", "nose_bleed", "sputum",
"retro_orbital_pain", "fever", "headache",
"hematemesis", "hepatomegaly", "itch",
"jaundice", "joint_pain", "anorexia",
"maculopapular_rash", "muscle_pain", "rhinorrhea",
"nausea", "petechiae", "photophobia", "purpura",
"erysipelas", "sore_throat", "splenomegaly",
"vaginal_bleeding", "vomiting", "weakness_malaise")}
names(symp_list) <- symp_names
#Rejoin individual elements of list into df
symp_df <- do.call(bind_cols, symp_list)
pd_symptoms_dummy <- bind_cols(test, symp_df) %>%
mutate(day_illness = as.integer(date - first_symptom_date))
# Create matrix template --------------------------------------------------
#Day 1 to 20 matrix
consent_id <- unique(pd_symptoms_dummy$consent_id) #list of all participants
consent_id_rep <- rep(consent_id, each = 20) #vector of repeat study (joining var 1)
day_illness <- rep(1:20, length(consent_id)) #vector of illness days (joining var 2)
fs_matrix_template <- data_frame(consent_id = consent_id_rep,
day_illness = day_illness) %>%
left_join(test %>% select(consent_id, result) %>% distinct(), by = "consent_id")
#Rejoin original data to template. There may be missing days as forms were not always completed up to
#the final date
fs_matrix <- fs_matrix_template %>%
left_join(pd_symptoms_dummy, by = c("consent_id", "day_illness"))
#Fill after values with false (Surveys are no longer completed after symptoms are not reported)
fs_matrix[symp_names] <- map(fs_matrix[symp_names], function(x){
case_when(is.na(x) ~ FALSE, TRUE ~ x)
})
#Any symptom variable added (whether any synptom was experienced on this day)
fs_matrix$any_symp <- apply(fs_matrix[symp_names], MARGIN = 1, any, na.rm= TRUE)
#Num symptom var added (total number of symptoms experienced on a certain day)
fs_matrix$total_symp <- apply(fs_matrix[symp_names], MARGIN = 1, sum, na.rm= TRUE)
#filter out those who have no symptoms on the first day
no_symptoms_day1 <- fs_matrix$consent_id[fs_matrix$day_illness == 1 & fs_matrix$total_symp == 0]
fs_matrix <- fs_matrix %>%
filter(!consent_id %in% no_symptoms_day1)
|
590f024e6541dabcac98085e8281f7348a783fb8
|
4ea08d36b916aa8388e28c44c04d2d4c0e4577ff
|
/knntest2.R
|
3bb0f24c2d216b809cb43e84bb762a3c3921bfc1
|
[] |
no_license
|
anhnguyendepocen/econoknn
|
73b02c3b5dde7ad44fce6686fbf99002856f67aa
|
e08f2b8615e948b2347ea957d4e594b826107f48
|
refs/heads/master
| 2021-09-06T02:28:28.605024
| 2018-02-01T18:05:36
| 2018-02-01T18:05:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,791
|
r
|
knntest2.R
|
setwd("~/research/gcp/econoknn")
library(readstata13)
library(Hmisc)
source("knnalg2.R")
redo.demean <- F
df <- read.dta13("/home/solomon/Dropbox/GCP_Reanalysis/interpolation/data/consolidated/mortality_AEA_time-variant-inc_2.dta")
df <- subset(df, agegrp == 33)
df <- subset(df, !is.na(precip1_GMFD) & !is.na(precip2_GMFD))
if (redo.demean) {
factorouts <- c('precip1_GMFD : factor(adm1)', 'precip2_GMFD : factor(adm1)', 'factor(adm2)', 'factor(adm1) : factor(year)')
save.demean("time-variant.RData", df, 'deathrate', 'GMFD_poly1', factorouts)
}
load("time-variant.RData")
df$dmyy <- dmyy
df$dmxx <- dmxxs[,1]
df$income.rank <- rank(df$gdppcstate)
df$climtas.rank <- rank(df$Tmeanstate_GMFD)
df$temp.rank <- rank(df$GMFD_poly1)
adm1sizes <- sapply(1:max(df$adm1), function(adm1) sum(df$adm1 == adm1))
KK <- 750
get.knn.beta <- function(income.rank, climtas.rank, temp.rank) {
dists <- (income.rank - df$income.rank)^2 + (climtas.rank - df$climtas.rank)^2 + (temp.rank - df$temp.rank)^2
adm1order <- df$adm1[order(dists)[1:KK]]
adm1order <- adm1order[!duplicated(adm1order)]
adm1total <- cumsum(adm1sizes[adm1order])
adm1s <- adm1order[which(adm1total > KK)[1]]
mod <- lm(dmyy ~ 0 + dmxx, df[df$adm1 %in% adm1s,])
c(coef(mod), vcov(mod))
}
get.knn.curve <- function(income.rank, climtas.rank, tas0, tas1, length.out) {
result <- data.frame(tas=tas0, deathrate=0, var=0)
for (tas in seq(tas0, tas1, length.out=length.out)[-1]) {
dists <- abs(tas * 365 - df$GMFD_poly1)
temp.rank <- mean(df$temp.rank[which(dists == min(dists))])
betavar <- get.knn.beta(income.rank, climtas.rank, temp.rank)
deathrate <- betavar[1] * (tas - tas0)
var <- betavar[2] * (tas - tas0)^2
result <- rbind(result, data.frame(tas, deathrate, var))
}
result
}
income.ranks <- quantile(df$income.rank, c(.25, .5, .75))
climtas.ranks <- quantile(df$climtas.rank, c(.25, .5, .75))
income.values <- quantile(df$gdppcstate, c(.25, .5, .75), na.rm=T)
climtas.values <- quantile(df$Tmeanstate_GMFD, c(.25, .5, .75))
results <- data.frame(tas=c(), deathrate=c(), var=c(), income=c(), climtas=c())
for (zz1 in 1:3) {
for (zz2 in 1:3) {
curve <- get.knn.curve(income.ranks[zz1], climtas.ranks[zz2], -6, 46, 27)
curve$income <- round(income.values[zz1], -1)
curve$climtas <- round(climtas.values[zz2], 1)
results <- rbind(results, curve)
}
}
results$income <- factor(results$income, rev(sort(unique(results$income))))
results$ymin <- results$deathrate - sqrt(results$var)
results$ymax <- results$deathrate + sqrt(results$var)
ggplot(all.smooth(results, 'tas', c('deathrate', 'ymin', 'ymax'), c('climtas', 'income'), span=.2), aes(x=tas)) +
facet_grid(income ~ climtas, scales="free_y") +
geom_line(aes(y=deathrate)) +
geom_ribbon(aes(ymin=ymin, ymax=ymax), alpha=.4) +
xlab("Temperature") + ylab("Death Rate") +
scale_x_continuous(expand=c(0, 0)) + theme_minimal()
ggsave("knn-nonant.pdf", width=7, height=5)
## Split out by country
df$myiso <- df$iso
df$myiso[nchar(df$iso) == 2] <- "EUR"
for (myiso in unique(df$myiso)) {
if (myiso == "JPN")
next
subdf <- df[df$myiso == myiso,]
get.knn.beta <- function(income.rank, climtas.rank, temp.rank) {
dists <- (income.rank - subdf$income.rank)^2 + (climtas.rank - subdf$climtas.rank)^2 + (temp.rank - subdf$temp.rank)^2
adm1order <- subdf$adm1[order(dists)[1:KK]]
adm1order <- adm1order[!duplicated(adm1order)]
adm1total <- cumsum(adm1sizes[adm1order])
adm1s <- adm1order[which(adm1total > KK)[1]]
mod <- lm(dmyy ~ 0 + dmxx, subdf[subdf$adm1 %in% adm1s,])
c(coef(mod), vcov(mod))
}
get.knn.curve <- function(income.rank, climtas.rank, tas0, tas1, length.out) {
result <- data.frame(tas=tas0, deathrate=0, var=0)
for (tas in seq(tas0, tas1, length.out=length.out)[-1]) {
dists <- abs(tas * 365 - subdf$GMFD_poly1)
temp.rank <- mean(subdf$temp.rank[which(dists == min(dists))])
betavar <- get.knn.beta(income.rank, climtas.rank, temp.rank)
deathrate <- betavar[1] * (tas - tas0)
var <- betavar[2] * (tas - tas0)^2
result <- rbind(result, data.frame(tas, deathrate, var))
}
result
}
results <- data.frame(tas=c(), deathrate=c(), var=c(), income=c(), climtas=c())
for (zz1 in 1:3) {
for (zz2 in 1:3) {
curve <- get.knn.curve(income.ranks[zz1], climtas.ranks[zz2], -6, 46, 27)
curve$income <- round(income.values[zz1], -1)
curve$climtas <- round(climtas.values[zz2], 1)
results <- rbind(results, curve)
}
}
results$income <- factor(results$income, rev(sort(unique(results$income))))
results$ymin <- results$deathrate - sqrt(results$var)
results$ymax <- results$deathrate + sqrt(results$var)
ggplot(all.smooth(results, 'tas', c('deathrate', 'ymin', 'ymax'), c('climtas', 'income'), span=.2), aes(x=tas)) +
facet_grid(income ~ climtas) +
geom_line(aes(y=deathrate)) +
geom_ribbon(aes(ymin=ymin, ymax=ymax), alpha=.4) +
xlab("Temperature") + ylab("Death Rate") +
scale_x_continuous(expand=c(0, 0)) + theme_minimal()
ggsave(paste0("knn-nonant-", myiso, ".pdf"), width=7, height=5)
}
## Uninteracted
get.knn.beta <- function(temp.rank) {
dists <- (temp.rank - df$temp.rank)^2
adm1order <- df$adm1[order(dists)[1:KK]]
adm1order <- adm1order[!duplicated(adm1order)]
adm1total <- cumsum(adm1sizes[adm1order])
adm1s <- adm1order[which(adm1total > KK)[1]]
mod <- lm(dmyy ~ 0 + dmxx, df[df$adm1 %in% adm1s,])
c(coef(mod), vcov(mod))
}
get.knn.curve <- function(tas0, tas1, length.out) {
result <- data.frame(tas=tas0, deathrate=0, var=0)
for (tas in seq(tas0, tas1, length.out=length.out)[-1]) {
dists <- abs(tas * 365 - df$GMFD_poly1)
temp.rank <- mean(df$temp.rank[which(dists == min(dists))])
betavar <- get.knn.beta(temp.rank)
deathrate <- betavar[1] * (tas - tas0)
var <- betavar[2] * (tas - tas0)^2
result <- rbind(result, data.frame(tas, deathrate, var))
}
result
}
results <- get.knn.curve(-6, 46, 27)
results$ymin <- results$deathrate - sqrt(results$var)
results$ymax <- results$deathrate + sqrt(results$var)
results$group <- T
ggplot(all.smooth(results, 'tas', c('deathrate', 'ymin', 'ymax'), 'group'), aes(x=tas)) +
geom_line(aes(y=deathrate)) +
geom_ribbon(aes(ymin=ymin, ymax=ymax), alpha=.4) +
xlab("Temperature") + ylab("Death Rate") +
scale_x_continuous(expand=c(0, 0)) + theme_minimal()
ggsave(paste0("knn-uninteracted.pdf"), width=7, height=5)
|
d2fef747d93e410f13a36a28ae07faa9966689c8
|
b0917fe842734573b056276767f233ab1696dd62
|
/inst/testpackages/testpkg0/R/bad_coding.R
|
0585c0d9614cce2fc7b863f1c4ca81b41b8ba7c8
|
[] |
no_license
|
aljabadi/BiocCheck
|
53c788308cc6e0f52bb1b66d73d3c589e97d56e8
|
a4dec415d595c11ee0eb2c2e1bff5cc36e89b2c1
|
refs/heads/master
| 2023-04-19T10:15:10.423997
| 2021-04-14T01:30:43
| 2021-04-14T01:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 823
|
r
|
bad_coding.R
|
bad_fun <- function(){
update.packages("base")
for (i in 1:10) {
print("test")
}
sapply(letters, function(x) x)
}
pasteTest <- function() {
message(paste0("A", "B"))
message ( paste("A", "B"))
warning(paste("A", "B"), "C")
warning(paste0("A", "B"), "C")
stop("A", paste0("B", "C"))
stop("A", paste("B", "C"))
}
invalid_ref <- function() {
bcheck <- BiocCheck:BiocCheck
red <- 1
xbluex <- 3
bcheck <- xbluex:red
}
bad_dl <- function() {
dataurl <- "https://raw.githubusercontent.com/file"
githurl <- "https://github.com/tree/master/"
githurl <- "https://dropbox.com/data?dl=1"
laburl <- "https://gitlab.com/raw/master/data"
bucketurl <- "https://bitbucket.org/test/raw/file"
download.file(dataurl, destfile = tempfile())
}
|
c6e36c56ce1a8d500f4d61feae991c5b13a5161c
|
9e2296d74051d725efcc28cab16ca7703c8a6c1b
|
/man/set_app_parameters.Rd
|
e96196d36b98cb649d9aff962a976454f85a5a54
|
[] |
no_license
|
neuhausi/periscope
|
59f5d74cc7e399a9a9e03e19199409a6438a4a91
|
e0364b0db9b9bbcbc4b6c295bbbb6fa1d1d65fd4
|
refs/heads/master
| 2023-07-06T05:44:50.295396
| 2023-07-03T21:39:01
| 2023-07-03T21:39:01
| 171,934,957
| 27
| 1
| null | 2023-07-03T21:39:02
| 2019-02-21T19:49:03
|
R
|
UTF-8
|
R
| false
| true
| 1,588
|
rd
|
set_app_parameters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ui_helpers.R
\name{set_app_parameters}
\alias{set_app_parameters}
\title{Set Application Parameters}
\usage{
set_app_parameters(
title,
titleinfo = NULL,
loglevel = "DEBUG",
showlog = TRUE,
app_version = "1.0.0"
)
}
\arguments{
\item{title}{application title text}
\item{titleinfo}{character string, HTML value or NULL
\itemize{
\item{A \strong{character} string will be used to set a link target. This means the user
will be able to click on the application title and be redirected in a new
window to whatever value is given in the string. Any valid URL, File, or
other script functionality that would normally be accepted in an <a href=...>
tag is allowed.}
\item{An \strong{HTML} value will be used to as the HTML content for a modal pop-up
window that will appear on-top of the application when the user clicks on the
application title.}
\item{Supplying \strong{NULL} will disable the title link functionality.}
}}
\item{loglevel}{character string designating the log level to use for
the userlog (default = 'DEBUG')}
\item{showlog}{enable or disable the visible userlog at the bottom of the
body on the application. Logging will still take place, this disables the
visible functionality only.}
\item{app_version}{character string designating the application version (default = '1.0.0').}
}
\description{
This function sets global parameters customizing the shiny application.
}
\section{Shiny Usage}{
Call this function from \code{program/global.R} to set the application
parameters.
}
|
5f0ecd2e15c8aff5556b25ad70a26a9e311fb045
|
8a1c37df68b008fd45c20306f82fab720e881454
|
/R/qqPlotDemo.R
|
a13d044fa37024bfdd0ef09e9c6a41c47862089c
|
[] |
no_license
|
cran/CarletonStats
|
80a126daaf4b85a6f37bcffb74727f4e6e23b8a5
|
ca18949778d921fceb85adbf2928fde75e5a2c08
|
refs/heads/master
| 2023-08-31T10:54:45.273600
| 2023-08-22T16:50:09
| 2023-08-22T18:30:35
| 21,466,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,805
|
r
|
qqPlotDemo.R
|
#' Demonstration of the normal qq-plot.
#'
#' Demonstrate the normal quantile-quantile plot for samples drawn from
#' different populations.
#'
#' Draw a random sample from the chosen sample and display the normal qq-plot
#' as well as the histogram of its distribution.
#'
#' @param n sample size
#' @param distribution population distribution. Options are \code{"normal"},
#' \code{"t"},\code{"exponential"}, \code{"chi.square"}, \code{"F"} or
#' \code{"beta"} (partial matches are accepted).
#' @param mu mean for the normal distribution.
#' @param sigma (positive) standard deviation for the normal distribution.
#' @param df (positive) degrees of freedom for the t-distribution.
#' @param lambda positive rate for the exponential distribution.
#' @param numdf (positive) numerator degrees of freedom for the chi-square
#' distribution.
#' @param dendf (positive) denominator degrees of freedom for the chi-square
#' distribution.
#' @param shape1 positive parameter for the beta distribution (shape1 = a).
#' @param shape2 positive parameter for the beta distribution (shape2 = b).
#' @return Returns invisibly the random sample.
#' @author Laura Chihara
#' @keywords normal quantile-quantile plot
#' @examples
#'
#' qqPlotDemo(n = 30, distr = "exponential", lambda = 1/3)
#'
#'
#' @importFrom stats rchisq rnorm rt rf rbeta qqnorm qqline
#' @export
qqPlotDemo <-
function(n = 25, distribution = "normal", mu = 0,sigma = 1, df = 10,
lambda = 10, numdf = 10, dendf = 16, shape1 = 40, shape2 = 5)
{ #get random sample from a distribution and plot its
#histogram and normal quantile-quantile plot
distr <- pmatch(distribution, c("normal", "t", "exponential", "chi.square", "F", "beta"), nomatch = NA)
if (is.na(distr)) stop("Distribution must be one of \"normal\", \"t\", \"exponential\", \"chi.square\", \"F\", or \"beta\" ")
if (sigma <=0|| df <=0 || lambda <= 0 || numdf <= 0 || dendf <= 0 || shape1 <= 0 || shape2 <= 0) stop("Parameter must be positive.")
x <- switch(distr,
normal = rnorm(n, mu, sigma),
t = rt(n, df),
exponential = rexp(n, rate = lambda),
chi.square = rchisq(n, df),
F = rf(n, numdf, dendf),
beta = rbeta(n, shape1, shape2))
distr.expand <- char.expand(distribution, c("normal", "t", "exponential", "chi.square", "F", "beta"), nomatch = warning("No match"))
par.old <- par(mfrow = c(2, 1), mar=c(2.1, 4.1, 2, 2), cex.main = .8, cex.axis = .8, cex.lab = .8)
hist(x, main = "", xlab = "")
title(paste("Sample size ", n , "; ", distr.expand, "distribution", sep=" "))
par(mar=c(4.1,4.1,2,3))
qqnorm(x)
qqline(x)
on.exit(par(par.old))
invisible(x)
}
|
14daaae684d714e2591d8221a83d8c9704ecdbf0
|
81057a4f9fc81d3b285081bb7a7066abe37f7965
|
/doi-bandit-planner/app.R
|
de1d4f0bbbf58f5de2cb107dfb541f106387a894
|
[] |
no_license
|
carlos-chre-work/Practicing
|
f22dfd1a02796f1c1e726815f3aae9edee92abcf
|
e61551826a2cdef8ddd8de80742b87eeab0170d1
|
refs/heads/master
| 2020-07-07T00:13:02.907008
| 2019-08-20T13:01:40
| 2019-08-20T13:01:40
| 203,182,273
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,770
|
r
|
app.R
|
library(shiny)
library(ggplot2)
library(dplyr)
library(tidyr)
source("functions")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Day Of Innovation: Shiny Test Planner"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
numericInput(
"sessions",
"Total Sessions Available for Testing",
value = 1000,
min = 100
),
sliderInput(
"controlRate",
"Control Rate",
value = 0.05,
min = 0.0,
max = 1.0,
step = 0.001
),
numericInput(
"numArms",
"Number of Alternative Variants",
value = 1,
min = 1,
max = 6
),
h3("Alternative Rates"),
sliderInput(
"rate1",
"Variant 1",
value = 0.05,
min = 0,
max = 1,
step = 0.001
),
conditionalPanel(
"input.numArms > 1",
sliderInput(
"rate2",
"Variant 2",
value = 0.05,
min = 0,
max = 1,
step = 0.001
)
),
conditionalPanel(
"input.numArms > 2",
sliderInput(
"rate3",
"Variant 3",
value = 0.05,
min = 0,
max = 1,
step = 0.001
)
),
conditionalPanel(
"input.numArms > 3",
sliderInput(
"rate4",
"Variant 4",
value = 0.05,
min = 0,
max = 1,
step = 0.001
)
),
conditionalPanel(
"input.numArms > 4",
sliderInput(
"rate5",
"Variant 5",
value = 0.05,
min = 0,
max = 1,
step = 0.001
)
),
conditionalPanel(
"input.numArms > 5",
sliderInput(
"rate6",
"Variant 6",
value = 0.05,
min = 0,
max = 1,
step = 0.001
)
),
h3("todo: change to sessions/day later"),
numericInput(
"batchSize",
"number of sessions in a batch",
value = 10,
min = 1
),
numericInput(
"lagNumber",
"number of batches until we expect a reward",
value = 4,
min = 0
)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("costPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$costPlot <- renderPlot({
testRates <- map_dbl(1:6, ~input[[paste0('rate', .x)]])[1:input$numArms]
rates <- c(input$controlRate, testRates)
numberOfBatches <- floor(input$sessions/input$batchSize)
run_simulation(
rates,
input$controlRate,
input$sessions,
input$batchSize,
numberOfBatches,
input$lagNumber
) %>%
tidy_cumulative_cost() %>%
plot_cumulative_cost()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
fd5bfd32d0cb197a5e6d9c77d9de84502f3f212a
|
e5036c7f2d13b1cea8b010acaee53ce34074b918
|
/man/GenUD_MS.Rd
|
046971cdfcf8510129fadc6c5faf7846e9a08e17
|
[] |
no_license
|
cran/UniDOE
|
28c90515ebf5139cef7e3559b75f0e183834971b
|
8e3ba53b85975865c17641e91b6d495018cad175
|
refs/heads/master
| 2021-09-12T16:19:30.056341
| 2018-04-18T12:22:01
| 2018-04-18T12:22:01
| 112,484,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,457
|
rd
|
GenUD_MS.Rd
|
\name{GenUD_MS}
\alias{GenUD_MS}
\title{Generating Uniform Design of Experiments using diffrent initial designs}
\usage{
GenUD_MS(n, s, q, crit, maxiter, nshoot, vis)
}
\description{
This function takes n,s,q,crit and nshoot to return a design. nshoot number of random initial designs are used in each shoot. The design returned is the best design over all shoots.
}
\arguments{
\item{n}{an integer R object. Run of Experiment}
\item{s}{an integer R object. Factor of Experiment.}
\item{q}{an integer R object. Level of Experiment.}
\item{crit}{an character R object. Type of criterion to use.
"maximin" -- maximin Discrepancy ;
"CD2" --Centered L2 Discrepancy ;
"WD2" -- Wrap-around L2 Discrepancy;
"MD2" --Mixture L2 Discrepancy ;}
\item{maxiter}{a positive integer R object. Maximum iteration number in outer while loop of SATA algorithm in each shoot.}
\item{nshoot}{Total counts to try different initial designs.}
\item{vis}{an boolean R object. If true, plot the criterion value sequence for all shoots.}
}
\value{
Best design over all shoots.
}
\author{
Aijun Zhang, Haoyu Li, Shijie Quan
}
\references{
Zhang, A. and Li, H. (2017). UniDOE: An R package for constructing uniform design of experiments via stochastic and adaptive threshold accepting algorithm. Technical Report.
}
\examples{
D = GenUD_MS(36, 4, 6, crit="CD2",
maxiter=50, nshoot = 6,
vis=TRUE)
}
|
b217282a633538f73f925690f31266b0f6973ee8
|
6d4f31e1c8e619810260ea3d67a043731b456d1c
|
/Rscripts/drying.R
|
6ee67a2d284af68ce7ec88ac3e46e066902d58d8
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
BIOL548O/macdonald_andrew
|
bc35b7ae18cb65b8a2033eb3ba88fd9f46457376
|
acbde2d1975f74aba6f4be6f723917c2ea71cb62
|
refs/heads/master
| 2020-04-09T12:44:33.256793
| 2016-03-03T23:17:43
| 2016-03-03T23:17:43
| 51,113,120
| 0
| 1
| null | 2016-03-04T00:19:20
| 2016-02-04T23:23:53
|
R
|
UTF-8
|
R
| false
| false
| 397
|
r
|
drying.R
|
# drying rate survey
# Andrew MacDonald March 2012
# load packages ---------------------------------------
library(readr)
# read in raw data ------------------------------------
dry.survey <- read.table("data-raw/drying.survey.csv",
comment.char = "#",sep = ",",
header = TRUE)
export(dry.survey, "data/water_survey.csvy", comment = FALSE)
|
71139e0c160c3b7408711080fcc504db3cbd530d
|
cfaf35c07adaf9393c519e72e32b773952789035
|
/R_profile_helper.R
|
f6afdb5a3d293518acff954f21b3300ca1daf0f8
|
[] |
no_license
|
GrahamDB/testing_selac
|
25ba9c70fbb05c9cf75a0cd688f34cf15660b1a0
|
663ad0563cc19810899f8b841aef656e2538b254
|
refs/heads/master
| 2020-03-21T16:51:00.983839
| 2018-08-09T21:04:00
| 2018-09-06T16:07:41
| 138,797,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58,407
|
r
|
R_profile_helper.R
|
#Profile basic test
source("setup.R")
local({
if(!require(profvis,lib.loc = user_path)) {
install.packages("profvis",lib = user_path)
if(!require(profvis,lib.loc = user_path)) {
stop("Failed to install profiler")
}
}
library(htmlwidgets,lib.loc = user_path)
library(jsonlite,lib.loc = user_path)
library(yaml,lib.loc = user_path)
invisible(T)
})
print(selac_release)
# setup_selac_for_profiling()
test_selac_hmm <- function(phy,
fasta.file,
nuc.model=c("JC", "GTR", "HKY", "UNREST"),
gamma.type=c("none", "median","quadrature","lognormal" ),
nCores=1){
nuc.model=match.arg(nuc.model)
gamma.type=match.arg(gamma.type)
if(nuc.model == "HKY") stop("HKY model not implemented for GetLikelihoodSAC_CodonForManyCharGivenAllParams.")
include.gamma = (gamma.type != "none")
if(!include.gamma) gamma.type = "quadrature"
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
codon.data <- chars[phy$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=phy,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> res
return(res)
}
# basic loader to build further tests
load_inputs <- function(){
tree <- read.tree("rokasYeast.tre")
phy <- drop.tip(tree, "Calb")
yeast.gene <- read.dna("gene1Yeast.fasta", format="fasta")
yeast.gene <- as.list(as.matrix(cbind(yeast.gene))[1:7,])
chars <- selac:::DNAbinToCodonNumeric(yeast.gene)
codon.data <- chars[phy$tip.label,]
aa.data <- selac:::ConvertCodonNumericDataToAAData(codon.data, numcode=1)
aa.optim <- apply(aa.data[, -1], 2, selac:::GetMaxName) #starting values for all, final values for majrule
aa.optim.full.list <- aa.optim
codon.freq.by.aa <- selac:::GetCodonFreqsByAA(codon.data[,-1], aa.optim, numcode=1)
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
aa.optim.frame.to.add <- matrix(c("optimal", aa.optim), 1, dim(codon.data)[2])
colnames(aa.optim.frame.to.add) <- colnames(codon.data)
codon.data <- rbind(codon.data, aa.optim.frame.to.add)
codon.data <- selac:::SitePattern(codon.data, includes.optimal.aa=TRUE)
aa.optim = codon.data$optimal.aa
codon.index.matrix = selac:::CreateCodonMutationMatrixIndex()
}
load_rokasYeast <- function(){
tree <- read.tree("rokasYeast.tre")
phy <- drop.tip(tree, "Calb")
yeast.gene <- read.dna("gene1Yeast.fasta", format="fasta")
yeast.gene <- as.list(as.matrix(cbind(yeast.gene))[1:7,])
chars <- selac:::DNAbinToCodonNumeric(yeast.gene)
codon.data <- chars[phy$tip.label,]
list(input.key="rokasYeast",phy=phy,codon.data=codon.data)
}
test_selac.gamma.quadrature <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5), 5)),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE,
gamma.type="quadrature",
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
test_selac.gamma.median <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5), 5)),
codon_data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE,
gamma.type="median",
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE)
}
test_selac.unrest <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, rep(1,11))),
codon.data, phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="UNREST",
codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
test_selac.gtr <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5))),
codon.data, phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
std.params = c(C.Phi.q.Ne = 4*4e-7*.5*5e6,
alpha=1.829272,
beta=0.101799)
std.gamma=0.0003990333
std.base.freq = c(A=0.25,C=0.25,G=0.25)
std.poly.params = c(NA,NA)
std.gamma.shape = 5
hmm.params = c(C.Phi.q.Ne = 2,
alpha=1.829272,
beta=0.101799)
std.sel.reg = 0.01
## Notes on nuc.mutation.params:
# used as rates value in selac:::CreateNucleotideMutationMatrix(rates, model, base.freqs)->res
# either: length(base.freqs) == 4 && sum(base.freqs) == 1
# or: is.null(base.freqs) == TRUE
# dim(res) == c(4,4)
# rowSums(res) == rep(1,4)
## selac:::CreateNucleotideMutationMatrix with JC model
# rates = rates[1] (ie just uses first value)
## selac:::CreateNucleotideMutationMatrix with GTR model
# rates = rates[1:5] (ie just uses first 5 values)
## selac:::CreateNucleotideMutationMatrix with HKY model
# rates = rates[1:2] (ie just uses first 2 values)
## selac:::CreateNucleotideMutationMatrix with UNREST model
# rates = rates[1:11] (ie just uses first 11 values)
#
# std.nuc.mutation.paramsA = c(1,1,1,1,1)
# std.nuc.mutation.paramsB = rep(1,11)
# std.nuc.mutation.paramsC = c(1,1,1,1,1)
std.nuc.params = mapply(rep_len,length.out=c(JC=1,GTR=5,HKY=2,UNREST=11),x=rep(1,4),
USE.NAMES = T,SIMPLIFY = F)
test_selac_std <- function(phy, codon.data,
nuc.model=c("JC", "GTR", "HKY", "UNREST"),
gamma.type=c("none", "median","quadrature","lognormal" ),
nCores=1){
nuc.model=match.arg(nuc.model)
gamma.type=match.arg(gamma.type)
if(nuc.model == "HKY") stop("HKY model not implemented for GetLikelihoodSAC_CodonForManyCharGivenAllParams.")
include.gamma = (gamma.type != "none")
aa.data <- selac:::ConvertCodonNumericDataToAAData(codon.data, numcode=1)
aa.optim <- apply(aa.data[, -1], 2, selac:::GetMaxName) #starting values for all, final values for majrule
aa.optim.full.list <- aa.optim
codon.freq.by.aa <- selac:::GetCodonFreqsByAA(codon.data[,-1], aa.optim, numcode=1)
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
aa.optim.frame.to.add <- matrix(c("optimal", aa.optim), 1, dim(codon.data)[2])
colnames(aa.optim.frame.to.add) <- colnames(codon.data)
codon.data <- rbind(codon.data, aa.optim.frame.to.add)
codon.data <- selac:::SitePattern(codon.data, includes.optimal.aa=TRUE)
aa.optim = codon.data$optimal.aa
codon.index.matrix = selac:::CreateCodonMutationMatrixIndex()
model.params = std.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParams
if(include.gamma){
model.params=c(model.params,std.gamma.shape)
lSAC.c4mc.full(log(model.params),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE, gamma.type=gamma.type,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=nCores)->res
}else{
lSAC.c4mc.full(log(model.params),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=nCores)->res
}
return(res)
}
#round(selac.gtr, 3)
#get_test_key <- function(phy.source, nuc.model, gamma.type, nCores, seed)
run_profile <- function(src_data,nuc.model,gamma.model,seed,nCores){
set.seed(seed)
cat(sprintf("Start: %s_%s_%s_%s_%i_%i\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed))
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed)
model.LL=NA
try({
prof_obj <- profvis({
model.LL=test_selac_std(src_data$phy,
src_data$codon.data,
nuc.model = nuc.model,
gamma.type = gamma.model,
nCores = nCores)
}, prof_output = paste0(profile_prefix,".Rprof"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s_%s_%s_%s_%i_%i\tLL: %0.3f\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed,
model.LL))
if(!file.exists(paste0(src_data$input.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src_data$input.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed,
model.LL),
file=paste0(src_data$input.key,"_LL_log.csv"),
append = T)
model.LL
}
run_simple_selac_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1"){
setup_selac_for_profiling(ref=ref)
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
"selac19XX",
"GTR",
"noneXquadrature",
selac_release,
3,
seed)
src.key="selac19XX"
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('selac_paper_data/SalichosRokas.tre')
result=list(loglik=NA)
nuc.model = 'GTR'
gamma.type="noneXquadrature"
nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'tmp_data/', phy = tree, n.partitions=3,
edge.length = 'optimize', optimal.aa = 'none', data.type='codon',
codon.model = 'GY94', nuc.model = 'GTR',
include.gamma = FALSE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = FALSE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 3, max.restarts = 1, max.evals=20)
}, prof_output = paste0(profile_prefix,".Rprof"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
save(result,file=sprintf('selac_paper_output/yeastSalRokSelacGTRG_quad_%s.Rdata',profile_prefix))
result$loglik
}
run_full_selac_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=3){
setup_selac_for_profiling(ref=ref)
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
"selacFULLb",
"GTR",
"noneXquadrature",
selac_release,
nCores,
seed)
src.key="selacFULLb"
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('selac_paper_data/SalichosRokas.tre')
result=list(loglik=NA)
nuc.model = 'GTR'
gamma.type="noneXquadrature"
#nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'selac_paper_data/', phy = tree,
edge.length = 'optimize', optimal.aa = 'none', data.type='codon',
codon.model = 'GY94', nuc.model = 'GTR',
include.gamma = FALSE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = FALSE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = nCores, max.restarts = 1, max.evals=20)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
save(result,file=sprintf('selac_paper_output/yeastSalRokSelacGTRG_quad_%s.Rdata',profile_prefix))
result$loglik
}
run_test_ecoli_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=3){
setup_selac_for_profiling(ref=ref)
src.key="ecoliTEST"
nuc.model = 'UNREST'
gamma.type="quadrature"
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed)
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = 'selac', nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = TRUE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = nCores, n.cores.by.gene.by.site=1,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
run_ecoli_profile_mode <- function(mode=c("SHORTTEST","TEST","SHORT",
"SHORTTESTHMM","SHORTHMM","LONGHMM",
"FASTHMMTEST","HMMEVAL50","HMMEVALFULL",
"FASTHMMDEBUG","FASTHMMSINGLEDEBUG"),
seed=sample.int(1e6,1),
codon.model=c("selac","none","GY94","YN98"),
nuc.model=c("GTR","UNREST","JC"),
ref="v1.6.1-rc1",
include.gamma=T,
gamma.type=c("quadrature","median","lognormal","none"),
nCores=1){
setup_selac_for_profiling(ref=ref)
mode=match.arg(mode)
src.key=paste0("ecoli",mode)
codon.model = match.arg(codon.model)
nuc.model = match.arg(nuc.model)
if(!include.gamma)
{ gamma.type="quadrature"; gamma.mode="none";}
else {
gamma.mode=gamma.type=match.arg(gamma.type)
}
if(gamma.type=="none"){
include.gamma=F
gamma.type="quadrature"
gamma.mode="none"
}
profile_prefix=sprintf("%s_%s_%s_%s_%s_%i_%i",
src.key,
codon.model,
nuc.model,
gamma.mode,
selac_release,
nCores,
seed)
if(file.exists(sprintf('ecoli_output/%s_result.Rdata',profile_prefix))){
try({
load(file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
if(!is.null(result$loglik) && is.finite(result$loglik)) {
cat(sprintf("Skip: %s\n",
profile_prefix))
return(result$loglik)
}
})
cat(sprintf("Rebuilding: %s\n",
profile_prefix))
}
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
if(mode=="TEST"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="SHORTTEST"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2, max.iterations = 1,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
}else if(mode=="SHORTTESTHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = TRUE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=1)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
}else if(mode=="SHORT"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2, max.iterations = 1,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="SHORTHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize',data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=1)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="LONGHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize',data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=5, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=5)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="FASTHMMDEBUG") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
# try({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
# })
}else if(mode=="FASTHMMSINGLEDEBUG") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
# lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA <- function(codon.data, phy, Q_codon_array, codon.freq.by.aa=NULL, codon.freq.by.gene=NULL, aa.optim_array, codon_mutation_matrix, Ne, rates, numcode, diploid, n.cores.by.gene.by.site=1, verbose=FALSE){
nsites.unique <- dim(codon.data$unique.site.patterns)[2]-1
final.likelihood.vector <- rep(NA, nsites.unique)
#We rescale the codon matrix only:
diag(codon_mutation_matrix) = 0
diag(codon_mutation_matrix) = -rowSums(codon_mutation_matrix)
scale.factor <- -sum(diag(codon_mutation_matrix) * codon.freq.by.gene, na.rm=TRUE)
codon_mutation_matrix_scaled = codon_mutation_matrix * (1/scale.factor)
#Finish the Q_array codon mutation matrix multiplication here:
if(diploid == TRUE){
Q_codon_array = (2 * Ne) * codon_mutation_matrix_scaled * Q_codon_array
}else{
Q_codon_array = Ne * codon_mutation_matrix_scaled * Q_codon_array
}
diag(Q_codon_array) = 0
diag(Q_codon_array) = -rowSums(Q_codon_array)
#Put the na.rm=TRUE bit here just in case -- when the amino acid is a stop codon, there is a bunch of NaNs. Should be fixed now.
#scale.factor <- -sum(Q_codon_array[DiagArray(dim(Q_codon_array))] * equilibrium.codon.freq, na.rm=TRUE)
## This is obviously not very elegant, but not sure how else to code it to store this stuff in this way -- WORK IN PROGRESS:
#expQt <- GetExpQt(phy=phy, Q=Q_codon_array, scale.factor=NULL, rates=rates)
#Generate matrix of root frequencies for each optimal AA:
root.p_array <- codon.freq.by.gene
#root.p_array <- t(root.p_array)
#root.p_array <- root.p_array / rowSums(root.p_array)
#rownames(root.p_array) <- .unique.aa
phy.sort <- reorder(phy, "pruningwise")
# Q_codon_array_vectored <- c(t(Q_codon_array)) # has to be transposed
# Q_codon_array_vectored <- Q_codon_array_vectored[.non_zero_pos]
anc.indices <- unique(phy.sort$edge[,1])
if(verbose){
MultiCoreLikelihoodBySite <- function(nsite.index){
tmp <- selac:::GetLikelihoodSAC_CodonForSingleCharGivenOptimumHMMScoring(charnum=nsite.index, codon.data=codon.data$unique.site.patterns,
phy=phy.sort, Q_codon_array=Q_codon_array,
root.p=root.p_array, scale.factor=scale.factor,
anc.indices=anc.indices, return.all=FALSE)
cat(".")
return(tmp)
}
} else {
MultiCoreLikelihoodBySite <- function(nsite.index){
tmp <- selac:::GetLikelihoodSAC_CodonForSingleCharGivenOptimumHMMScoring(charnum=nsite.index, codon.data=codon.data$unique.site.patterns, phy=phy.sort, Q_codon_array_vectored=Q_codon_array_vectored, root.p=root.p_array, scale.factor=scale.factor, anc.indices=anc.indices, return.all=FALSE)
return(tmp)
}
}
final.likelihood.vector <- unlist(lapply(1:nsites.unique, MultiCoreLikelihoodBySite))
if(verbose) cat("|\n")
return(final.likelihood.vector)
}
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
phy=tree; verbose=T;
diploid=T; numcode=1
x <- model.params
k.levels =0
importance.of.aa.dist.in.selective.environment.change = 1
rate.for.selective.environment.change = x[length(x)]
x = x[-length(x)]
aa.properties=NULL
if(include.gamma == TRUE){
shape = x[length(x)]
x = x[-length(x)]
}
C.Phi.q.Ne <- x[1]
C <- 4
q <- 4e-7
Ne <- 5e6
Phi.q.Ne <- C.Phi.q.Ne / C
Phi.Ne <- Phi.q.Ne / q
Phi <- Phi.Ne / Ne
alpha <- x[2]
beta <- x[3]
gamma <- 0.0003990333 #volume.fixed.value
if(k.levels > 0){
if(nuc.model == "JC") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(1, model=nuc.model, base.freqs=base.freqs)
poly.params <- x[7:8]
}
if(nuc.model == "GTR") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[9:length(x)], model=nuc.model, base.freqs=base.freqs)
poly.params <- x[7:8]
}
if(nuc.model == "UNREST") {
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[6:length(x)], model=nuc.model, base.freqs=NULL)
poly.params <- x[4:5]
}
}else{
if(nuc.model == "JC") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(1, model=nuc.model, base.freqs=base.freqs)
}
if(nuc.model == "GTR") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[7:length(x)], model=nuc.model, base.freqs=base.freqs)
}
if(nuc.model == "UNREST") {
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[4:length(x)], model=nuc.model, base.freqs=NULL)
}
}
nuc.mutation.rates.vector <- c(nuc.mutation.rates, rate.for.selective.environment.change)
codon_mutation_matrix <- matrix(nuc.mutation.rates.vector[codon.index.matrix], dim(codon.index.matrix))
codon_mutation_matrix[is.na(codon_mutation_matrix)]=0
nsites.unique <- dim(codon.data$unique.site.patterns)[2]-1
nsites <- sum(codon.data$site.pattern.counts)
if(include.gamma==TRUE){
if(gamma.type == "median"){
rates.k <- DiscreteGamma(shape=shape, ncats=ncats)
weights.k <- rep(1/ncats, ncats)
}
if(gamma.type == "quadrature"){
rates.and.weights <- LaguerreQuad(shape=shape, ncats=ncats)
rates.k <- rates.and.weights[1:ncats]
weights.k <- rates.and.weights[(ncats+1):(ncats*2)]
}
if(gamma.type == "lognormal"){
rates.and.weights <- LogNormalQuad(shape=shape, ncats=ncats)
rates.k <- rates.and.weights[1:ncats]
weights.k <- rates.and.weights[(ncats+1):(ncats*2)]
}
#ttmmpp <- c(nuc.mutation.rates.vector, nsites.unique, nsites, C, Phi, rates.k, q, Ne, shape, importance.of.aa.dist.in.selective.environment.change)
#writeLines(text = paste(ttmmpp), con = "~/Desktop/selac_parameter.txt", sep = "\t")
final.likelihood.mat = matrix(0, nrow=ncats, ncol=nsites.unique)
for(k.cat in sequence(ncats)){
if(k.levels > 0){
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=poly.params, k=k.levels)
}else{
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=NULL, k=k.levels)
}
Q_codon_array <- selac:::FastCreateEvolveAACodonFixationProbabilityMatrix(aa.distances=aa.distances, nsites=nsites, C=C, Phi=Phi*rates.k[k.cat], q=q, Ne=Ne, include.stop.codon=TRUE, numcode=numcode, diploid=diploid, flee.stop.codon.rate=0.9999999, importance.of.aa.dist.in.selective.environment.change) #Cedric: added importance
final.likelihood.mat[k.cat,] = GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA(codon.data, phy, Q_codon_array,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene, codon_mutation_matrix=codon_mutation_matrix,
Ne=Ne, rates=NULL, numcode=numcode, diploid=diploid, n.cores.by.gene.by.site=n.cores.by.gene.by.site, verbose=verbose)
}
likelihood <- sum(log(colSums(exp(final.likelihood.mat)*weights.k)) * codon.data$site.pattern.counts)
}else{
if(k.levels > 0){
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=poly.params, k=k.levels)
}else{
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=NULL, k=k.levels)
}
Q_codon_array <- selac:::FastCreateEvolveAACodonFixationProbabilityMatrix(aa.distances=aa.distances, nsites=nsites, C=C, Phi=Phi, q=q, Ne=Ne, include.stop.codon=TRUE, numcode=numcode, diploid=diploid, flee.stop.codon.rate=0.9999999, importance.of.aa.dist.in.selective.environment.change) #Cedric: added importance
final.likelihood = GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA(codon.data, phy, Q_codon_array, codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene, codon_mutation_matrix=codon_mutation_matrix, Ne=Ne, rates=NULL, numcode=numcode, diploid=diploid, n.cores.by.gene.by.site=n.cores.by.gene.by.site, verbose=verbose)
likelihood <- sum(final.likelihood * codon.data$site.pattern.counts)
}
if(neglnl) {
likelihood <- -1 * likelihood
}
if(verbose > 1) {
results.vector <- c(likelihood, C*Phi*q, alpha, beta, gamma, Ne, ape::write.tree(phy))
names(results.vector) <- c("likelihood", "C.Phi.q.Ne", "alpha", "beta", "gamma", "Ne", "phy")
print(results.vector)
}else if(verbose){
results.vector <- c(likelihood, alpha, beta, gamma)
names(results.vector) <- c("likelihood", "alpha", "beta", "gamma")
print(results.vector)
}
if(is.na(likelihood) || is.nan(likelihood)){
res <-1000000
}else{
res <- likelihood
}
result$loglik <-res
# try({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
# })
}else if(mode=="FASTHMMTEST") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="HMMEVAL50") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,50))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="HMMEVALFULL") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else {
cat(sprintf("Request for %s mode not understood.\n",as.character(mode)))
}
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_",codon.model,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_",codon.model,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.mode,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_",codon.model,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
run_test_ecoli_optimize_no_profile <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=1, auto.skip=T){
setup_selac_for_profiling(ref=ref)
src.key="ecoliDEBUG"
nuc.model = 'UNREST'
gamma.type="quadrature"
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed)
if(file.exists(sprintf('ecoli_output/%s_result.Rdata',profile_prefix))){
try({
load(file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
if(!is.null(result$loglik) && is.finite(result$loglik)) {
cat(sprintf("Skip: %s\n",
profile_prefix))
return(result$loglik)
}
})
cat(sprintf("Rebuilding: %s\n",
profile_prefix))
}
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
# try({
# prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = 'selac', nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = TRUE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = TRUE,
n.cores.by.gene = nCores, n.cores.by.gene.by.site=1,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
# }, prof_output = paste0(profile_prefix,".Rprof"),interval=1)
# save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
# })
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
|
abb1e598748c7ce973e63ec9703b19295201965b
|
90118609911bb5a97941830cbc1f7020d239405d
|
/Samsung/code/exploratory/randomForest.R
|
feb73c70b2ee4a1f8280aeae145940dd39ac1d6b
|
[] |
no_license
|
shannonrush/Contests
|
6e0eba560413723a11d1de1ddb0e3b1f1f440c56
|
4b930624a5c6ea1030f1e82b1829d4fa0a1261df
|
refs/heads/master
| 2021-01-01T05:31:27.616173
| 2014-06-05T18:56:17
| 2014-06-05T18:56:17
| 2,697,116
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,651
|
r
|
randomForest.R
|
source("../../helpers.R")
load("../../../data/processed/training_clean.rda")
load("../../../data/processed/validating_clean.rda")
set.seed(335)
library(randomForest)
load("../../../data/processed/train_clean.rda")
load("../../../data/processed/test_clean.rda")
Optimum ntree with default sampsize:
rf325 <- randomForest(activity~.,data=train,do.trace=T,importance=T,replace=T,ntree=350)
p<-predict(rf325,test)
getAccuracy(p,test$activity)
300: 95.08%
325: 95.08%
350: 95.35% *
375: 95.15%
400: 95.22%
500: 95.15%
750: 95.02%
1000: 95.08%
1100:95.02%
1200: 92.29%
1300: 94.95%
rf350:
p<-predict(rf350,test)
getAccuracy(p,test$activity)
[1] "1416/1485"
[1] "95.35%"
prediction
actual laying sitting standing walk walkdown walkup
laying 293 0 0 0 0 0
sitting 0 226 38 0 0 0
standing 0 23 260 0 0 0
walk 0 0 0 228 0 1
walkdown 0 0 0 2 194 4
walkup 0 0 0 1 0 215
tuneRF to find optimum mtry:
suggested 46 but I found 23 (the default) to be optimum
tuning with tune:
library(e1071)
tune <- tune(randomForest, activity~.,data=train,do.trace=T)
rf <- randomForest(activity~.,do.trace=T,ntree=500,mtry=23,data=train)
p<-predict(rf,test)
getAccuracy(p,test$activity)
[1] "95.15%"
tune2 <-tune(randomForest,train.x=train[,-563],train.y=train[,563],data=train,validation.x=test[,-563],validation.y=test[,563],do.trace=T)
CURRENT BEST RF:
rf <- randomForest(activity~.,data=train,do.trace=T,importance=T,replace=T,ntree=350)
95.35% accurate on test set
|
cbef19e6a6398f15f0d74e3cd7ad289aa748dc98
|
80c7241e2f4f1e596e59eabbe8b679962da2a403
|
/PS6Package/man/createRace.Rd
|
8aa287bfc9f2585f08fbbc9f2ac12aec2c4e28a6
|
[] |
no_license
|
tmuraoka320/PS6
|
3ec6c30ed3fefcd1d64281953523516998458175
|
47f07333d80e86f012040f9324ac7b764a979e67
|
refs/heads/master
| 2020-11-30T04:57:19.660855
| 2016-03-19T18:49:03
| 2016-03-19T18:49:03
| 54,056,487
| 0
| 0
| null | 2016-03-16T18:25:13
| 2016-03-16T18:25:13
| null |
UTF-8
|
R
| false
| true
| 1,226
|
rd
|
createRace.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createRace.R
\name{createRace}
\alias{createRace}
\alias{createRace,Race-method}
\alias{plot,Race-method}
\title{Create an Object with class "Race"}
\usage{
createRace(candidate_list, ...)
}
\arguments{
\item{candidate_list}{A list of objects of class "Candidate"}
}
\value{
An object of class "Candidate" containing
\item{all_candidates}{The vector of the names of all candidates}
\item{delegatesWon}{The vector of the number of delegates won by each candidate}
\item{party}{The name of the candidates' party}
\item{remaining}{The number of remaining delegates}
\item{delegatesRequired}{The vector of the number of delegates required for each candidate in order to win}
}
\description{
Create an instance with S4 class "Race"
}
\examples{
obama <- createCandidate("obama", 333, "Republican")
bush <- createCandidate("bush", 30, "Republican")
gore <- createCandidate("gore", 500, "Republican")
cand_list <- list(obama, bush, gore)
createRace(cand_list)
candidates <- createRace(cand_list)
plot(candidates, "ANYTHING")
}
\author{
Taishi Muraoka \email{tmuraoka@wustl.edu}
}
\seealso{
\code{\link{createCandidate}}, \code{\link{PropNeeded}}
}
|
02eac078fbd4c1baf67dc8b33b3414e954233395
|
734405d4e0d6f941c50e5b4862943ffc1cab0aa6
|
/script/0703reshape예제.R
|
1bf7622e84e03442eff618f346c6ceb411fb87f9
|
[] |
no_license
|
valborgs/RStudy
|
e98a079a5d0ea7a48f6bf19555630d117df44983
|
ac1808fcc1c3362876e8f2d8823d304496f29231
|
refs/heads/master
| 2020-07-04T14:15:30.309572
| 2019-08-14T08:32:00
| 2019-08-14T08:32:00
| 202,308,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,301
|
r
|
0703reshape예제.R
|
# 예제 1
# melt: wide->long
# 파일에서 데이터 읽기: wide형 데이터
emp_wide<-read.csv("data\\2000-2013년 연령별실업율_연령별평균.csv")
View(emp_wide)
head(emp_wide)
# 변수명 확인
ls(emp_wide)
# variable과 value없이
# 기준열 지정하지 않고
# 변환열 지정하지 않고
emp_melt<-melt(emp_wide)
emp_melt
# 결과
# 기준열이 없어서 모든 데이터를 변환시킴
# 연령을 기준열로 잡아서 변환
# ->연령별 요소값이 문자기 때문
# 기준열만 지정하고 변환
# 기준열 id.var="연령별"
emp_melt2<-melt(emp_wide,id.var="연령별")
emp_melt2
# 기준열만 지정하고 변환
# 기준열 id.var="연령별"
# variable과 value의 name을 설정
emp_melt3<-melt(emp_wide,
id.var="연령별",
variable.name="년도",
value.name="실업율")
View(emp_melt3)
# dcast: long->wide
# 방법1: 원래 데이터로 변환
# '연령별'을 기준으로
# 'variable'열에 있는 모든 변수명을 열로 변환
# '연도' 열에 있는 모든 변수명을 열로 변환
emp_dcast1<-dcast(emp_melt2,연령별~variable)
View(emp_dcast1)
emp_dcast2<-dcast(emp_melt3,년도~연령별)
View(emp_dcast2)
# 기존 데이터의 행과 열의 방향을 변경하는 예제
|
6af5f43693ce9f15b18dbf1fbc2aa7d05310c353
|
7fc453391224956da9ce2867d9bd54530a66aa43
|
/R/is_site_model.R
|
017c463a7bb54fc74eaf88283af02b3aabd88fc0
|
[] |
no_license
|
cran/beautier
|
880277272f6cf48b4eca9c28db68e4a42c4ccc3a
|
439683e296d755698c3861b447106556e540aa9f
|
refs/heads/master
| 2022-08-30T18:21:31.772630
| 2022-08-11T09:40:07
| 2022-08-11T09:40:07
| 127,175,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,201
|
r
|
is_site_model.R
|
#' Determine if the object is a valid site_model
#' @param x an object, to be determined if it is a site_model
#' @return TRUE if the site_model is a valid site_model, FALSE otherwise
#' @seealso A site model can be created using \code{\link{create_site_model}}
#' @examples
#' check_empty_beautier_folder()
#'
#' # TRUE
#' is_site_model(create_gtr_site_model())
#' is_site_model(create_hky_site_model())
#' is_site_model(create_jc69_site_model())
#' is_site_model(create_tn93_site_model())
#'
#' # FALSE
#' is_site_model(NA)
#' is_site_model(NULL)
#' is_site_model("nonsense")
#' is_site_model(create_strict_clock_model())
#' is_site_model(create_bd_tree_prior())
#' is_site_model(create_mcmc())
#'
#' check_empty_beautier_folder()
#' @export
is_site_model <- function(
x
) {
result <- FALSE
tryCatch({
beautier::check_site_model(x)
result <- TRUE
},
error = function(e) {} # nolint do not care about e
)
result
}
#' Determine if the object is a valid GTR site model,
#' as created by \code{\link{create_gtr_site_model}}
#' @param x an object, to be determined if it is a valid GTR site model
#' @return TRUE if x is a valid GTR site model, FALSE otherwise
#' @author Richèl J.C. Bilderbeek
#' @examples
#' check_empty_beautier_folder()
#'
#' # site models
#' is_gtr_site_model(create_gtr_site_model())
#' is_gtr_site_model(create_hky_site_model())
#' is_gtr_site_model(create_jc69_site_model())
#' is_gtr_site_model(create_tn93_site_model())
#'
#' # other models
#' is_gtr_site_model(NA)
#' is_gtr_site_model(NULL)
#' is_gtr_site_model("nonsense")
#' is_gtr_site_model(create_strict_clock_model())
#' is_gtr_site_model(create_bd_tree_prior())
#' is_gtr_site_model(create_mcmc())
#'
#' check_empty_beautier_folder()
#' @export
is_gtr_site_model <- function(
x
) {
result <- FALSE
tryCatch({
beautier::check_gtr_site_model(x)
result <- TRUE
},
error = function(e) {} # nolint do not care about e
)
result
}
#' Determine if the object is a valid HKY site model,
#' as created by \code{\link{create_hky_site_model}}
#' @param x an object, to be determined if it is a valid HKY site model
#' @return TRUE if x is a valid HKY site model, FALSE otherwise
#' @author Richèl J.C. Bilderbeek
#' @examples
#' check_empty_beautier_folder()
#'
#' # site models
#' is_hky_site_model(create_hky_site_model())
#' is_hky_site_model(create_gtr_site_model())
#' is_hky_site_model(create_jc69_site_model())
#' is_hky_site_model(create_tn93_site_model())
#'
#' # other models
#' is_hky_site_model(NA)
#' is_hky_site_model(NULL)
#' is_hky_site_model("nonsense")
#' is_hky_site_model(create_strict_clock_model())
#' is_hky_site_model(create_bd_tree_prior())
#' is_hky_site_model(create_mcmc())
#'
#' check_empty_beautier_folder()
#' @export
is_hky_site_model <- function(
x
) {
if (!beautier::is_site_model(x)) return(FALSE)
if (x$name != "HKY") return(FALSE)
if (!"kappa" %in% names(x)) return(FALSE)
if (!"kappa_prior_distr" %in% names(x)) return(FALSE)
if (!beautier::is_distr(x$kappa_prior_distr)) return(FALSE)
if (!"freq_equilibrium" %in% names(x)) return(FALSE)
if (!beautier::is_freq_equilibrium_name(x$freq_equilibrium)) return(FALSE)
TRUE
}
#' Determine if the object is a valid JC69 site model
#' @param x an object, to be determined if it is a valid JC69 site model
#' @return TRUE if x is a valid JC69 site model, FALSE otherwise
#' @author Richèl J.C. Bilderbeek
#' @examples
#' check_empty_beautier_folder()
#'
#' # site models
#' is_jc69_site_model(create_gtr_site_model())
#' is_jc69_site_model(create_hky_site_model())
#' is_jc69_site_model(create_jc69_site_model())
#' is_jc69_site_model(create_tn93_site_model())
#'
#' # other models
#' is_jc69_site_model(NA)
#' is_jc69_site_model(NULL)
#' is_jc69_site_model("nonsense")
#' is_jc69_site_model(create_strict_clock_model())
#' is_jc69_site_model(create_bd_tree_prior())
#' is_jc69_site_model(create_mcmc())
#'
#' check_empty_beautier_folder()
#' @export
is_jc69_site_model <- function(
x
) {
if (!beautier::is_site_model(x)) return(FALSE)
if (x$name != "JC69") return(FALSE)
TRUE
}
#' Determine if the object is a valid TN93 site model,
#' @param x an object, to be determined if it is a valid TN93 site model,
#' as created by \code{\link{create_tn93_site_model}}
#' @return TRUE if x is a valid TN93 site model, FALSE otherwise
#' @author Richèl J.C. Bilderbeek
#' @examples
#' check_empty_beautier_folder()
#'
#' # site models
#' is_tn93_site_model(create_gtr_site_model())
#' is_tn93_site_model(create_hky_site_model())
#' is_tn93_site_model(create_jc69_site_model())
#' is_tn93_site_model(create_tn93_site_model())
#'
#' # other models
#' is_tn93_site_model(NA)
#' is_tn93_site_model(NULL)
#' is_tn93_site_model("nonsense")
#' is_tn93_site_model("")
#' is_tn93_site_model(c())
#' is_tn93_site_model(create_strict_clock_model())
#' is_tn93_site_model(create_bd_tree_prior())
#' is_tn93_site_model(create_mcmc())
#'
#' check_empty_beautier_folder()
#' @export
is_tn93_site_model <- function(
x
) {
result <- FALSE
tryCatch({
beautier::check_tn93_site_model(x)
result <- TRUE
},
error = function(e) {} # nolint do not care about e
)
result
}
|
7b6f7252d011efd28b25e73c1719997563610459
|
a2a203ff87ad061a06aa8348fcb7fbf8b5abfc68
|
/PS3/PS3_3.R
|
20444092f7e98a379897a1a5374ce0ca45a7811d
|
[] |
no_license
|
violentelder/ESE5023_Assignments
|
0e2bbd54055810aef866231b7345d751e103a854
|
1e07d184142093f57f62fdc446e36bdb4dfad8a3
|
refs/heads/main
| 2023-02-06T13:07:04.256940
| 2020-12-31T11:24:27
| 2020-12-31T11:24:27
| 298,271,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 667
|
r
|
PS3_3.R
|
#========Vegetarians and Zinc=======
library(tidyr)
library(dplyr)
setwd('D:/ESE5023/ESE5023_Assignments/Data')
DATA <- read.csv("Zn_woman.csv",header = T)
DATA <- as_tibble(DATA)
nonvegetarians <- DATA %>%
gather(type, value) %>%
filter(type == "Pregnant.nonvegetarians") %>%
pull(value)
vegetarians <- DATA %>%
gather(type, value) %>%
filter(type == "Pregnant.vegetarians") %>%
pull(value)
Nonpvegetarians <- DATA %>%
gather(type, value) %>%
filter(type == "Nonpregnant.vegetarians") %>%
pull(value)
t.test(nonvegetarians,vegetarians)
#=====extra:怀孕期间,孕妇zn含量是否会降低
t.test(Nonpvegetarians,vegetarians)
|
2eb78c7c3f2845140eca83b3aac9f691739bbb42
|
4f522d2d8d9d0a54ec6340f94ee66f74b1022050
|
/man/GetSparseCholeskyDefaultMicrobenchmarks.Rd
|
abd70f749f7b997177e7a070f397468248f4bdb6
|
[
"Apache-2.0"
] |
permissive
|
cran/RHPCBenchmark
|
2b69e57b644651eb3034b23bd8ff9df5a1ca1c84
|
c1335a4fcb14e6b871d73e3929f188a92a273308
|
refs/heads/master
| 2021-01-21T20:11:16.606699
| 2017-05-23T16:26:28
| 2017-05-23T16:26:28
| 92,201,283
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,661
|
rd
|
GetSparseCholeskyDefaultMicrobenchmarks.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparse_matrix_benchmark.R
\name{GetSparseCholeskyDefaultMicrobenchmarks}
\alias{GetSparseCholeskyDefaultMicrobenchmarks}
\title{Initializes the list of default sparse Cholesky factorization
microbenchmarks}
\usage{
GetSparseCholeskyDefaultMicrobenchmarks()
}
\value{
a list of \code{SparseMatrixMicrobenchmark} objects defining the
microbenchmarks to be executed. The microbenchmarks appear in the order
listed in the function description and are assigned the names enumerated
in the description.
}
\description{
\code{GetSparseCholeskyDefaultMicrobenchmarks} defines the default sparse
Cholesky factorization microbenchmarks to be executed by the
\code{\link{RunSparseMatrixBenchmark}} function. The current sparse
Cholesky factorization microbenchmarks cover a variety of matrices of
different dimensions and number of non-zero values. They are as follows:
\enumerate{
\item cholesky_ct20stif -- Boeing structural matrix with 2600295 nonzeros
\item cholesky_Andrews -- computer vision matrix with 760154
\item cholesky_G3_circuit -- AMD circuit simulation matrix with 7660826
nonzeros
}
See the documentation for the
\code{\link{SparseMatrixMicrobenchmark}} class for more details.
}
\seealso{
\code{\link{SparseMatrixMicrobenchmark}}
Other sparse matrix default microbenchmarks: \code{\link{GetSparseCholeskyExampleMicrobenchmarks}},
\code{\link{GetSparseLuDefaultMicrobenchmarks}},
\code{\link{GetSparseMatrixVectorDefaultMicrobenchmarks}},
\code{\link{GetSparseMatrixVectorExampleMicrobenchmarks}},
\code{\link{GetSparseQrDefaultMicrobenchmarks}}
}
|
fedcb59348bad15068df93111f3079755765c391
|
a3a3f2b7739fb126d516b7eddb444ea1394f0956
|
/pre-papilla-cells/ppcell/R/adultdensity.R
|
f7d2dfa70ed87561c3b92c61d0dd59f8e0eb64b7
|
[] |
no_license
|
nevillejackson/Fleece-biology
|
1935436dfbb5edd4b8fc42240576074d9c54a041
|
f55f4c9a960905aec50a75e15842d62cde97fc0f
|
refs/heads/master
| 2022-01-02T02:38:28.940195
| 2021-12-31T05:07:18
| 2021-12-31T05:07:18
| 105,637,314
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 541
|
r
|
adultdensity.R
|
adultdensity <-
function(adultsurfarea,follno,pfollno,sofollno,sdfollno)
# adultdensity() - convert follicle nos to densities given surfarea
{
last <- length(follno)
samm2 <- adultsurfarea * 100 # sa in mm^2 instead of cm^2
dens <- follno[last]/samm2
pdens <- pfollno[last]/samm2
sodens <- sofollno[last]/samm2
sddens <- sdfollno[last]/samm2
sop <- (sofollno[last] + sdfollno[last])/pfollno[last]
outlist <- list(adultdens=dens,adultpdens=pdens,adultsodens=sodens,adultsddens=sddens,adultsopratio=sop)
return(outlist)
}
|
28add0ea785bcdfd0e6a9b5970d8b0480db153cd
|
4c2835dcc76cdff0f3c7f2fcae6fc59896a6f632
|
/R/constants.R
|
05aa0498acff778b251d6ed889284088bf5971aa
|
[] |
no_license
|
birderboone/Radar
|
fe747a1d3991a4e1ab99616d4b5efe765d786f46
|
b1715b1006faf676152cef697c05f49e230f017b
|
refs/heads/master
| 2021-05-06T23:16:38.482865
| 2017-12-03T20:31:02
| 2017-12-03T20:31:02
| 112,961,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 249
|
r
|
constants.R
|
#constants
deg2rad <- 0.017453293 #convert degrees to radians
halfbeam <- 0.008464847 #0.485 degree half power radar beam width of 3 dB beam in radians
earthe <- 6378137 #earth radius
rad2deg <- 57.295779513
WDZ <- "%Y%m%d-%H%M%S"
|
40fa964d20a0d071bb1e5c614f3b18910f237826
|
35ddd33a47b4694bae19bc64bc3c3e6e09c1c964
|
/P6/Intento.R
|
331990b90058df7e604ee1be4af095eba744ae17
|
[] |
no_license
|
angelesmttz/Simulacion
|
a9f566f51671fb117fdb0784e5b7daf8e05f896f
|
79825bd9978bfebed2fbb1c0083fb37e9dbf16ea
|
refs/heads/master
| 2021-01-19T16:05:13.983682
| 2017-12-12T18:01:30
| 2017-12-12T18:01:30
| 99,719,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 762
|
r
|
Intento.R
|
l <- 1.5
n <- 50
pi <- 0.05
pr <- 0.02
v <- l / 30
agentes <- data.frame(x = double(), y = double(), dx = double(), dy = double(), estado = character())
for (i in 1:n) {
e <- "S"
if (runif(1) < pi) {
e <- "I"
}
agentes <- rbind(agentes, data.frame(x = runif(1, 0, l), y = runif(1, 0, l),
dx = runif(1, -v, v), dy = runif(1, -v, v),
estado = e))
}
print(agentes)
agentes$x <- agentes$x + agentes$dx
agentes$y <- agentes$y + agentes$dy
agentes[agentes$x < 0,1]<-agentes[agentes$x < 0,1]+l
agentes[agentes$x > l,1]<-agentes[agentes$x > l,1]-l
agentes[agentes$y < 0,2]<-agentes[agentes$y < 0,2]+l
agentes[agentes$y > l,2]<-agentes[agentes$y > l,2]-l
rm(list=ls())
gc()
|
57cd3a44cc95a9aa28fc51fdec7dca3556720f9c
|
29ba4f78eb2558b1a2db9fdc8b3f5a1e61dc245a
|
/man/plot_dive.Rd
|
38c2dfe7ef95e7ab169bde8d376336e7777343cb
|
[] |
no_license
|
mczapanskiy-usgs/pfsh.dive
|
0ff431379ee8dfcb9158848c05c7dba4c73ab71e
|
c72fcdefad6cba1f5c32eda6e19323d6350310fc
|
refs/heads/master
| 2021-05-09T01:10:53.034516
| 2018-02-07T03:58:21
| 2018-02-07T03:58:21
| 119,780,100
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,614
|
rd
|
plot_dive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_dive.R
\name{plot_dive}
\alias{plot_dive}
\title{Create a dive plot}
\usage{
plot_dive(calib_tdr, diveid, surface_thr = 0.1)
}
\arguments{
\item{calib_tdr}{data.frame. Use \code{\link{calibrate_tdr}}.}
\item{diveid}{integer. Identifier of dive to plot.}
\item{surface_thr}{numeric. Threshold for surface noise.}
}
\value{
gtable with two plots.
}
\description{
\code{plot_dive} creates a plot of a single dive in two panels. The left
panel zooms in on the dive itself and the right panel is an overview of the
entire event. In the left panel, blue points are within the dive and red
points are the surrounding records for context. The right panel has two
curves. The red one shows the original, pre-calibration points. The green
one is the calibrated dive with the surface offset applied. The shaded
region delineates the dive itself. In both panels, the dashed blue line
indicates the surface noise threshold.
}
\examples{
# Load metadata
metadata_path <- system.file('extdata',
'MOC2015PFSHmetadata.csv',
package = 'pfsh.dive')
metadata <- readr::read_csv(metadata_path)
# Read CEFAS output
tdr_path <- system.file('extdata',
paste0(metadata$TDR_filename[1], '.CSV'),
package = 'pfsh.dive')
tdr <- read_cefas(tdr_path,
metadata$Deployed[1],
metadata$Recovered[1])
# Calibrate TDR data
calib_tdr <- calibrate_tdr(tdr, metadata$DeployID[1])
# Plot a dive
plot_dive(calib_tdr, 4)
}
|
16531d936af5c27c3f8dadb1926ebae9c6428897
|
924c24d5633f0296c461ce22764bd905898cbdad
|
/R/bignmf.R
|
98164edceea949c64d044b54c2ca8731820e7be6
|
[] |
no_license
|
congc19/bignmf
|
cf5127131ef061293c5ef3a6187b5613b1420344
|
416708f9bf05f198b9251e83533ee3b2014e2936
|
refs/heads/master
| 2020-06-30T13:36:48.582561
| 2016-05-22T14:46:49
| 2016-05-22T14:46:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,905
|
r
|
bignmf.R
|
##V: the matrix to be factorized.
##r: the rank of resulting matrices.
##initial: can be "H.kmeans", "W.kmeans", or the initial W or H matrix. "H.kmeans" means initialize H with kmeans(V,r)$centers, while "W.kmeans" means initialized W with kmeans centers. The default is "H.kmeans".
##max.iteration: the number of iterations allowed.
## stop.condition: the function compares the norm of projected gradient matrix in the k-th iteration and the norm of gradient matrix after the first iteration. If the former one is less than the latter multiplying stop.condition, iteration stops .
##Detail: The nonnegative matrix factorization tries to find nonnegative matrices W and H, so that V \approx WH. Using sum of squares loss function, the problem is to solve \min_{W\ge0, H\ge0} f(V - WH). bignmf finds W minimizing f given H and then finds H give W, i.e. alternating least squares. The function treats nonnegative constrained regression as a special L1 regression and solves it via coordinate descent method.
##value: the function returns a list of length 3.
##W : the resulting nonnegative matrix W.
##H : the resulting nonnegative matrix H.
##iterations : number of iterations.
##Example:
# v_mat <- matrix(rexp(60000,2), 200, 300)
# system.time(re <- bignmf(v_mat, 5))
# re$iterations
#
# v_mat <- matrix(rexp(6000000,2), 2000, 3000)
# v_mat[v_mat < quantile(v_mat, .1)] <- 0
# system.time(re <- bignmf(v_mat, 20))
# re$iterations
bignmf <- function(V, r, max.iteration=200, stop.condition=1e-4){
V <- as.matrix(V)
if(storage.mode(V)!="double"){
storage.mode(V) <- "double"
}
nm <- dim(V)
n <- nm[1]
m <- nm[2]
W <- abs(matrix(rnorm(r * n), n, r))
H <- abs(matrix(rnorm(r * m), r, m))
wh <- .Call("whupdate",V, W, H, as.integer(max.iteration), stop.condition)
if(wh$iterations == max.iteration)
warning("Iteration doesn't converge!")
return(wh)
}
|
f0a0919a47ffad41d02cf52bce3a7f9aa2d91390
|
59d439e3690bb5bda3077ba410ff13ae5a3d9731
|
/R/add_block_number.R
|
ec459b68d2558c36d93f36e16bc9123f39d2f53b
|
[] |
no_license
|
MoseleyBioinformaticsLab/li.ntkoldlrkoLipidomics
|
e4864a8d79ab678fa1d8020a94b1f168c319b040
|
b6442d3d6c87fec6e8cc48ac0f0a59144124dcad
|
refs/heads/main
| 2023-04-13T03:15:41.842885
| 2023-01-03T21:12:02
| 2023-01-03T21:12:02
| 581,537,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 360
|
r
|
add_block_number.R
|
#' .. content for \description{} (no empty lines) ..
#'
#' .. content for \details{} ..
#'
#' @title
#' @param ko_info
#' @return
#' @author rmflight
#' @export
add_block_number <- function(ko_info) {
group_run = rle(ko_info$group_number)
block_groups = rep(seq(1, length(group_run$values)), group_run$lengths)
ko_info$block = block_groups
ko_info
}
|
3b8d8a7391506a7b307cc67135f2a0dd204becf4
|
fb036b826e32f4700c44a912079fa8ba3ba19f60
|
/AcmTransactions/code/PlotRanges.R
|
882b48ceb378b27df865649597a24f6845506264
|
[] |
no_license
|
srvanderplas/LieFactorSine
|
d10ad465ff5706fdd7f9b8bc9017cb6464a75e8e
|
7934646013864b0283f5259a36b7ffce0c2cc84a
|
refs/heads/master
| 2021-01-19T00:16:29.391362
| 2015-05-21T13:53:50
| 2015-05-21T13:53:50
| 8,753,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,424
|
r
|
PlotRanges.R
|
require(ggplot2)
require(plyr)
library(msm)
# set.seed(70032608) # so that intervals don't change
turkdata.full <- read.csv("./data/turkdataclean.csv", stringsAsFactors=FALSE)
turkdata <- subset(turkdata, len>10)
library(doMC)
registerDoMC(cores=12)
turkdata <- ddply(turkdata, .(ip.id, test_param), transform, n=length(test_param))
turkdata <- subset(turkdata, n>4)
key <- read.csv("./data/pictureKey.csv", stringsAsFactors=FALSE)
key$pic_name <- tolower(key$pic_name)
tmp <- ddply(key, .(pic_name), transform, w.min = w[c(1, 1:5)], w.max=w[c(2:6,6)], d.min=round(lmax[c(1, 1:5)], 3), d.max=round(lmax[c(2:6, 6)], 3))
turkdata2 <- merge(turkdata, tmp, by.x=c("pic_name", "answer.w"), by.y=c("pic_name", "w"), all.x=TRUE)
ggplot(data=subset(turkdata2, difficulty!="test")) +
geom_segment(aes(x=ip.id, xend=ip.id, y=w.min, yend=w.max), alpha=.1) +
geom_jitter(aes(x=ip.id, y=answer.w), alpha=.1) +
facet_wrap(~test_param) +
theme_bw()
ggplot(data=subset(turkdata2, difficulty!="test")) +
geom_segment(aes(x=ip.id, xend=ip.id, y=d.min, yend=d.max), alpha=.1) +
geom_jitter(aes(x=ip.id, y=lmax), alpha=.1) +
facet_wrap(~test_param) +
theme_bw()
participants <- dcast(ddply(turkdata, .(ip.id, test_param), summarise, n=length(test_param)), ip.id~test_param, value.var="n")
ipsubset <- subset(participants, rowSums(is.na(participants))==0 & rowSums(participants[,2:4]>6, na.rm=TRUE)==3)$ip.id
|
e5d714ac9dfe09d70681da68effaf8d1254c4067
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/JacobiEigen/examples/JacobiS.Rd.R
|
992f1ed0b4577cebe19805e5bef12821d3282811
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
r
|
JacobiS.Rd.R
|
library(JacobiEigen)
### Name: JacobiS
### Title: The Jacobi Algorithm using Rcpp with a stagewise rotation
### protocol
### Aliases: JacobiS
### ** Examples
V <- crossprod(matrix(runif(40, -1, 1), 8))
JacobiS(V)
all.equal(JacobiS(V)$values, Jacobi(V)$values)
zapsmall(crossprod(JacobiS(V)$vectors, Jacobi(V)$vectors))
|
068c15d00f6c7a0868878b6c55912a2b6217f1f7
|
4b571306a706d3fc339fdc3bea0501c867018d02
|
/run_analysis.R
|
66f25c9d3f0dfacc16a86824952eacba0dbfb65d
|
[] |
no_license
|
hari916/GettingandCleaningDataProject
|
9add1ffa11e4d63e400e36c707457a11c275845c
|
18cf28f6a362a9c078f83e1248cf53c6792c8d0c
|
refs/heads/master
| 2021-01-22T15:22:40.428180
| 2016-09-17T18:55:13
| 2016-09-17T18:55:13
| 68,470,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,815
|
r
|
run_analysis.R
|
###########################################################################################################
# Getting and Cleaning Data Week 4 Assignment
# run_analysis.R
# Initial ver 1.0
# Date Sep-18-2016
###########################################################################################################
rm(list = ls(all = TRUE))
library(plyr) # required for dplyr package
library(data.table) # a package that handles dataframe
library(dplyr) # for data table manipulations
temp <- tempfile() # initialize a vector to store file
download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",temp)
unzip(temp) #unzips the downloaded file
# Directories and files
data_dir <- "UCI\ HAR\ Dataset"
feature_file <- paste(data_dir, "/features.txt", sep = "")
activity_labels_file <- paste(data_dir, "/activity_labels.txt", sep = "")
x_train_file <- paste(data_dir, "/train/X_train.txt", sep = "")
y_train_file <- paste(data_dir, "/train/y_train.txt", sep = "")
subj_train_file <- paste(data_dir, "/train/subject_train.txt", sep = "")
x_test_file <- paste(data_dir, "/test/X_test.txt", sep = "")
y_test_file <- paste(data_dir, "/test/y_test.txt", sep = "")
subj_test_file <- paste(data_dir, "/test/subject_test.txt", sep = "")
# Read data from file
features <- read.table(feature_file, colClasses = c("character"))
activity_labels <- read.table(activity_labels_file, col.names = c("ActivityId", "Activity"))
x_train <- read.table(x_train_file)
y_train <- read.table(y_train_file)
subj_train <- read.table(subj_train_file)
x_test <- read.table(x_test_file)
y_test <- read.table(y_test_file)
subj_test <- read.table(subj_test_file)
############################################################################
# 1. Merges the training and the test sets to create one data set
###########################################################################
# Merge sensor data
train_sensor_data <- cbind(cbind(x_train, subj_train), y_train)
test_sensor_data <- cbind(cbind(x_test, subj_test), y_test)
sensor_data <- rbind(train_sensor_data, test_sensor_data)
# Label columns
sensor_labels <- rbind(rbind(features, c(562, "Subject")), c(563, "ActivityId"))[,2]
names(sensor_data) <- sensor_labels
###################################################################################################
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
####################################################################################################
sensor_mean_std <- sensor_data[,grepl("mean|std|Subject|ActivityId", names(sensor_data))]
#####################################################################################################
# 3. Uses descriptive activity names to name the activities in the data set
######################################################################################################
sensor_mean_std <- join(sensor_mean_std, activity_labels, by = "ActivityId", match = "first")
sensor_mean_std <- sensor_mean_std[,-1]
#######################################################################################################
# 4. Appropriately labels the data set with descriptive names.
#######################################################################################################
# Remove parentheses
names(sensor_mean_std) <- gsub('\\(|\\)',"",names(sensor_mean_std), perl = TRUE)
#Make syntactically valid names from vector
names(sensor_mean_std) <- make.names(names(sensor_mean_std))
# Assign proper names
names(sensor_mean_std) <- gsub('^t',"Time",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('^f',"Frequency",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('Acc',"Acceleration",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('GyroJerk',"AngularAcceleration",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('Gyro',"AngularSpeed",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('Mag',"Magnitude",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('\\.mean',".Mean",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('\\.std',".StandardDeviation",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('Freq\\.',"Frequency.",names(sensor_mean_std))
names(sensor_mean_std) <- gsub('Freq$',"Frequency",names(sensor_mean_std))
###############################################################################################################################
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
###############################################################################################################################
tidy = ddply(sensor_mean_std, c("Subject","Activity"), numcolwise(mean))
write.table(tidy, file = "sensor_averages_data.txt")
|
1729fb8b56502ab7f108cecf25766ba58870677b
|
2958ba45bce43cedb86154a9c469c1c8362f5832
|
/Plot2.R
|
404ef2e781ec8ec76a0f412cc715220be65f8ad7
|
[] |
no_license
|
mohaody/ExData_Plotting1
|
561fff9f8dbd318e512a9d02ed625e8a8f818ea1
|
3649680f2690ba9babb39d0bd80f4bff0b875ccf
|
refs/heads/master
| 2021-01-19T21:02:05.773616
| 2017-04-19T02:42:42
| 2017-04-19T02:42:42
| 88,597,172
| 0
| 0
| null | 2017-04-18T07:47:25
| 2017-04-18T07:47:24
| null |
UTF-8
|
R
| false
| false
| 1,062
|
r
|
Plot2.R
|
## Download and unzip the data file
setwd('C:/Users/xxxxxx/Desktop/ExData_Plotting1')
if(!file.exists('data')) dir.create('data')
fileUrl <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(fileUrl, destfile = './data/household_power_consumption.zip')
unzip('./data/household_power_consumption.zip', exdir = './data')
## read data into R
datafile <- file('./data/household_power_consumption.txt')
plotData <- read.table(datafile, header=TRUE, sep=";", na.strings="?")
subSetplotData <- plotData[plotData$Date %in% c("1/2/2007", "2/2/2007"),]
## Plot 2
# setup strings
DateTime <- strptime(paste(subSetplotData$Date, subSetplotData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GlobalActivePower <- as.numeric(subSetplotData$Global_active_power)
# open device
if(!file.exists('figures')) dir.create('figures')
png(filename = './figures/Plot2.png', width=480, height=480, units='px')
# plot figure 2
plot(DateTime, GlobalActivePower, xlab='', ylab='Global Active Power (kilowatts)', type='l')
# close device
dev.off()
|
7591ac06c8a06c2ef6671496de4ffcf16f67b905
|
2219b30a78132a42ef6cc1d6bbbf5b5248e14540
|
/HarpiaApp/harpia.R
|
06c90ab01148c78bacb83f298720e9206f459a8b
|
[] |
no_license
|
barbachanM/HarpiaTest
|
8f0fc031bae05c009b5443abf189c1b15a1b772d
|
cbe4bb0618c360af1cf050b5b4b4827fb073516f
|
refs/heads/master
| 2021-01-14T20:02:21.447481
| 2020-02-24T13:18:32
| 2020-02-24T13:18:32
| 242,740,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,402
|
r
|
harpia.R
|
library(shiny)
library(shinyjs)
library(shinyFiles)
library(shinythemes)
library(shinydashboard)
library(shinyBS)
library(hash)
library(stringr)
library(lmerTest)
require(ggplot2)
library(markovchain)
library(igraph)
library(mixOmics)
library(Boruta)
library(gtools)
library(stringi)
library(readr)
library(ggedit)
library(plotly)
library(devtools)
library(reticulate)
source("getAlphabets.R")
source("getDataStructure.R")
source("linearModel.R")
source("MarkovModel.R")
source("JS.R")
jscode <- "shinyjs.refresh = function() { history.go(0); }"
ui <- dashboardPage(skin = "black",
dashboardHeader(title = 'Harpia'),
dashboardSidebar(
shinyjs::useShinyjs(),
sidebarMenu(id = "mySidebar",
menuItem("Folder Uploads", tabName = "folder", icon = icon("far fa-folder")),
menuItem("Alphabet Upload",tabName = "file", icon = icon("far fa-file")),
menuItem("Entropy level", tabName = "entropy", icon = icon("far fa-bar-chart")),
menuItem("Pseudo count", tabName = "pc", icon = icon("far fa-plus"))
),
box(title = "Run Harpia!", solidHeader = T, width = 12,collapsible = TRUE,background = "black",align = "left",
actionButton("run", icon(name = "fas fa-play", class = "fa 4x"), width = '85%'
))
),
dashboardBody(
useShinyjs(),
extendShinyjs(text = jscode),
conditionalPanel(
condition = ("input.run == 0"),
tags$p(),
tabItems(
tabItem(tabName = "folder",
h3("Folder upload"),
helpText("Please upload folders containg tab delimited files."),
box(title = "Folder 1", solidHeader = T,
fluidRow(
column(2, offset = 0,
shinyDirButton('folder_G1', 'Group 1', 'Please select a folder')),
column(2, offset = 3,
checkboxInput("labelcheck", "Label"))),
conditionalPanel(
condition = "input.labelcheck == true",
textInput("label1","Label for Group 1:","")
),htmlOutput("directorypath")),tags$p(),
box(title = "Folder 2", solidHeader = T,
fluidRow(
column(2, offset = 0,
shinyDirButton('folder_G2', 'Group 2', 'Please select a folder')),
column(2, offset = 3,
checkboxInput("label2check", "Label"))),
conditionalPanel(
condition = "input.label2check == true",
textInput("label2","Label for Group 2:","")
),htmlOutput("directorypath2"))
),
tabItem(tabName = "file",
box(title = "Alphabet file upload", solidHeader = T, width = 12,
fileInput("fileAlphabet", "Choose Alphabet File", accept = c("text/csv", "text/comma-separated-values,text/plain",".csv")))),
tabItem(tabName = "entropy",
box( solidHeader = T, width = 12,
selectInput("selectH", label = h4("Select Entropy Level for Analysis"),
choices = list("-" = 0, "H2" = 2, "H3" = 3, "H4" = 4)))
),
tabItem(tabName = "pc",
box( solidHeader = T, width = 12,
selectInput("pseudocount", label = h4("Select Pseudocount value for Analysis"),
choices = list( "1/N" = "pc","1" = 1, "0.5" = 0.5, "0" = 0)))
)
)
)
,
conditionalPanel(
condition = "input.run",
fluidRow(
headerPanel("Your Harpia Results"),
fluidRow(
downloadButton("download", "Download"),
actionButton("refresh", "Refresh",icon(name = "refresh", class = "fa 4x fa-spin"))),tags$style(type='text/css', "#download {margin-left: 15px;}"),
tabsetPanel(id = "tabset",
tabPanel("Entropy Analysis"
,plotOutput("plot1"), tags$hr()
),
tabPanel("Markov Model Graphs", tags$div(class="header", checked=NA, tags$em(bsButton("help2","Info", icon = NULL, style = "inverse",size = "small", type = "action", block = FALSE, disabled = FALSE, value = FALSE))),fluidRow(
box(plotOutput("plot5"))
, box(plotOutput("plot6"))),fluidRow(box(imageOutput("JS_plot"))
)
),
tabPanel("Linear Model Analysis", tags$div(class="header", checked=NA,
tags$em(bsButton("help1","Info", icon = NULL, style = "inverse",
size = "small", type = "action", block = FALSE, disabled = FALSE,
value = FALSE)
)
),verbatimTextOutput("summaryMLE")
,tags$hr(),
fluidRow(box(plotOutput("plot3")
),box(plotOutput("plot4")
)
), plotOutput("plot2")
,tags$hr()
),
### Boruta
tabPanel("Boruta - Random forest", tags$div(class="header", checked=NA,
tags$em(bsButton("help4","Info", icon = NULL, style = "inverse",
size = "small", type = "action", block = FALSE, disabled = FALSE,
value = FALSE)
)
),selectInput("selectB", label = h4("Select Entropy Level for Classification", bsTooltip("selectB", "The entropy level of the analysis should be chosen based on the linear model result found on the Linear Model Analysis tab.", placement = "right", trigger = "hover")),
choices = list("-" = 0, "H1" = 1, "H2" = 2, "H3" = 3, "H4" = 4)),plotOutput("borutaplot")
,tags$hr()
,tags$hr(),tableOutput("bStats")
)
)
)
)
))
server <- shinyServer(function(input, output, session) {
start_time <- Sys.time()
observeEvent(input$run,addClass(selector = "body", class = "sidebar-collapse"))
volumes = getVolumes()
folderInput1 = NULL
folderInput1 <- reactive({
shinyDirChoose(input, 'folder_G1', roots = volumes, session = session,
restrictions = system.file(package = 'base'))
return(parseDirPath(volumes, input$folder_G1))
})
folderInput2 = NULL
folderInput2 <- reactive({
shinyDirChoose(input, 'folder_G2', roots = volumes, session = session,
restrictions = system.file(package = 'base'))
return(parseDirPath(volumes, input$folder_G2))
})
output$directorypath <- renderUI({
HTML(folderInput1())
})
output$directorypath2 = renderUI({
HTML(folderInput2())
})
observeEvent(input$run,{
withProgress(
{
alphabet = input$fileAlphabet
alphabets = getAlphabetsR(alphabet$datapath)
#Generate alphabets for all possible entropy levels
alphabetH1 = unlist(alphabets$H1)
alphabetH2 = unlist(alphabets$H2)
alphabetH3 = unlist(alphabets$H3)
alphabetH4 = unlist(alphabets$H4)
#Alphabet Size
nH1 = length(alphabetH1)
nH2 = length(alphabetH2)
nH3 = length(alphabetH3)
nH4 = length(alphabetH4)}
,
message = 'Creating Alphabets...',
detail = ''
)
files1 <- reactive({
list.files(path = folderInput1(), pattern = "*.csv", full.names = T)
})
nFiles1 <-reactive({length(files1())})
files2 <- reactive({
list.files(path = folderInput2(), pattern = "*.csv", full.names = T)
})
nFiles2 <- reactive({ length(files2())})
f1 = isolate(files1())
print(folderInput1())
f2 = isolate(files2())
print(f2)
updateSelectInput(session, 'selectB', choices =c(1:input$selectH))
if(input$selectH == 2){
EntropyAnalysisGroup1 = reactive({if(!is.null(f1)){
lof1 = isolate(files1())
path1 = isolate(folderInput1())
nf1 = isolate(nFiles1())
pc = input$pseudocount
if(pc == "pc"){
pc = 1/nH2
}
print(pc)
combo = getDataR_upto2(as.character(path1),alphabetH1,alphabetH2, pc)
return(combo)}
else(NULL)
})
EntropyAnalysisGroup2 = reactive({if(!is.null(f2)){
lof2 = isolate(files2())
path2 = isolate(folderInput2())
nf2 = isolate(nFiles2())
pc = input$pseudocount
if(pc == "pc"){
pc = 1/nH2
}
print(pc)
combo = getDataR_upto2(as.character(path2),alphabetH1,alphabetH2, pc)
return(combo)}
else(NULL)
})
}
if(input$selectH == 3){
EntropyAnalysisGroup1 = reactive({if(!is.null(f1)){
lof1 = isolate(files1())
path1 = isolate(folderInput1())
nf1 = isolate(nFiles1())
pc = input$pseudocount
if(pc == "pc"){
pc = 1/nH3
}
print(pc)
combo = getDataR_upto3(as.character(path1),alphabetH1,alphabetH2,alphabetH3, pc)
return(combo)}
else(NULL)
})
EntropyAnalysisGroup2 = reactive({if(!is.null(f2)){
lof2 = isolate(files2())
path2 = isolate(folderInput2())
nf2 = isolate(nFiles2())
pc = input$pseudocount
if(pc == "pc"){
pc = 1/nH3
}
print(pc)
combo = getDataR_upto3(as.character(path2),alphabetH1,alphabetH2,alphabetH3, pc)
return(combo)}
else(NULL)
})
}
if(input$selectH == 4){
EntropyAnalysisGroup1 = reactive({if(!is.null(f1)){
lof1 = isolate(files1())
path1 = isolate(folderInput1())
nf1 = isolate(nFiles1())
pc = input$pseudocount
if(pc == "pc"){
pc =1/nH4
}
combo = getDataR_upto4(as.character(path1),alphabetH1,alphabetH2,alphabetH3, alphabetH4, pc)
return(combo)}
else(NULL)
})
EntropyAnalysisGroup2 = reactive({if(!is.null(f2)){
lof2 = isolate(files2())
path2 = isolate(folderInput2())
nf2 = isolate(nFiles2())
pc = input$pseudocount
if(pc == "pc"){
pc = 1/nH4
}
combo = getDataR_upto4(as.character(path2),alphabetH1,alphabetH2,alphabetH3,alphabetH4, pc)
return(combo)}
else(NULL)
})
}
withProgress(message = "Calculating Entropy", value = 0.1, {
Group1Data = EntropyAnalysisGroup1()
incProgress(1/4, detail = paste(": Folder 1"))
Group2Data = EntropyAnalysisGroup2()
incProgress(2/4, detail = paste(": Folder 2"))
end_time <- Sys.time()
print(end_time - start_time)
#####$ Analysis:
incProgress(3/4, detail = paste("Generating plot"))
plot1 = reactive({
if(!is.null(EntropyAnalysisGroup2()) & !is.null(EntropyAnalysisGroup1())){
# Group2_Data = EntropyAnalysisGroup2()
# Group1_Data = EntropyAnalysisGroup1()
#
EntropyGroup1 = Group1Data$tEntropy
EntropyGroup2 = Group2Data$tEntropy
if(input$selectH == 2){
EntropyGroup1LM = as.matrix(cbind(EntropyGroup1$H0,EntropyGroup1$H1,EntropyGroup1$H2))
EntropyGroup2LM = as.matrix(cbind(EntropyGroup2$H0,EntropyGroup2$H1,EntropyGroup2$H2))
}
if(input$selectH == 3){
EntropyGroup1LM = as.matrix(cbind(EntropyGroup1$H0,EntropyGroup1$H1,EntropyGroup1$H2,EntropyGroup1$H3))
EntropyGroup2LM = as.matrix(cbind(EntropyGroup2$H0,EntropyGroup2$H1,EntropyGroup2$H2,EntropyGroup2$H3))
}
if(input$selectH == 4){
EntropyGroup1LM = as.matrix(cbind(EntropyGroup1$H0,EntropyGroup1$H1,EntropyGroup1$H2,EntropyGroup1$H3,EntropyGroup1$H4 ))
EntropyGroup2LM = as.matrix(cbind(EntropyGroup2$H0,EntropyGroup2$H1,EntropyGroup2$H2,EntropyGroup2$H3,EntropyGroup2$H4))
}
colnames(EntropyGroup1LM) = Group1Data$levels
colnames(EntropyGroup2LM) = Group2Data$levels
Group2 = colMeans(EntropyGroup2LM)
Group1 = colMeans(EntropyGroup1LM)
statsG1 = c()
statsG2 = c()
for (level in colnames(EntropyGroup2LM)){
statsG2 = c(statsG2,as.numeric(sd(EntropyGroup2LM[,level])))
statsG1 = c(statsG1,as.numeric(sd(EntropyGroup1LM[,level])))
}
G2Quantiles = t(matrix(statsG2, ncol=length(colnames(EntropyGroup2LM)), byrow=TRUE))
G2Quantiles = as.data.frame(G2Quantiles, stringsAsFactors=FALSE)
colnames(G2Quantiles) = c("SD")
row.names(G2Quantiles) = colnames(EntropyGroup2LM)
G1Quantiles = t(matrix(statsG1, ncol=length(colnames(EntropyGroup1LM)), byrow=TRUE))
G1Quantiles = as.data.frame(G1Quantiles, stringsAsFactors=FALSE)
colnames(G1Quantiles) = c("SD")
row.names(G1Quantiles) = colnames(EntropyGroup1LM)
dataEntropy = data.frame(
Group = factor(c(rep("Group2",length(colnames(EntropyGroup2LM))),c(rep("Group1",length(colnames(EntropyGroup2LM)))))),
Level = factor(c(rep(colnames(EntropyGroup2LM),2)), levels=c(colnames(EntropyGroup2LM))),
Entropy = c(Group2,Group1),
SD = c(G2Quantiles$SD,G1Quantiles$SD)
)
pd <- position_dodge(0.05)
#
if(input$label1 != ""){
label1 = input$label1
}
else{label1 = "Group1" }
if(input$label2 != ""){
label2 = input$label2
}
else{label2 = "Group2" }
return({ggplot(data=dataEntropy, aes(x=Level, y=Entropy, group=Group, colour= Group)) +
geom_errorbar(aes(ymin=Entropy-SD, ymax=Entropy+SD), width=.1,position=pd) +
geom_line() +
scale_color_manual(labels=c(label1,label2),values=c('firebrick3','deepskyblue3'))+
geom_point(position=pd, size=3)
})
}
})
output$plot1 = renderPlot({
plot1()
})
})
withProgress({
lmerAnalysis = reactive({
if(!is.null(EntropyAnalysisGroup2()) & !is.null(EntropyAnalysisGroup1())){
Group2_Data = EntropyAnalysisGroup2()
Group1_Data = EntropyAnalysisGroup1()
if(input$label1 != ""){
label1 = input$label1
}
else{label1 = "Group1" }
if(input$label2 != ""){
label2 = input$label2
}
else{label2 = "Group2" }
LM1 = linearModelR(Group1_Data$levels, Group1_Data$Entropy, label1)
LM2 = linearModelR(Group2_Data$levels, Group2_Data$Entropy, label2)
G1_LM = (LM1$LM)
#G1_LM = lapply(G1_LM[,2], as.numeric)
#print(unlist(LM1))
G2_LM = (LM2$LM)
#G2_LM = lapply(G2_LM[,2], as.numeric)
#print(LM2)
MLEData = rbind(G1_LM,G2_LM)
mod1 = lmer(Entropy ~ Genotype*Level + (1|Mouse),MLEData)
summary(mod1)
combo = list(mod1 = mod1, MLEData = MLEData)
return(combo)
}
else(return(NULL))
})
plot2 = reactive({
if(!is.null(lmerAnalysis())){
outputOptions(output,"plot2",suspendWhenHidden=FALSE)
MLEData = lmerAnalysis()
MLEData = MLEData$MLEData
x = interaction(MLEData$Genotype,MLEData$Level)
if(input$label1 != ""){
label1 = input$label1
}
else{label1 = "Group1" }
if(input$label2 != ""){
label2 = input$label2
}
else{label2 = "Group2" }
return({
ggplot(MLEData, aes(x= x , y=Entropy, fill= Genotype)) + geom_boxplot() + scale_fill_manual(labels=c(label1,label2),values=c('firebrick3','deepskyblue3')) + labs(x = "Genotype*Level", y = "Entropy")
})
}
else(return(NULL))})
output$plot2 = renderPlot({
if(!is.null(lmerAnalysis())){
print(plot2())}
})
plot3 = function()({if(!is.null(lmerAnalysis())){
mle = isolate(lmerAnalysis())
return({plot(mle$mod1)
})}})
output$plot3 = renderPlot({
if(!is.null(lmerAnalysis())){
plot3()}
})
plot4 = reactive({if(!is.null(lmerAnalysis())){
#outputOptions(output,"plot4",suspendWhenHidden=FALSE)
mle = isolate(lmerAnalysis())
return({qqnorm(resid(mle$mod1))
qqline(resid(mle$mod1))
})}})
output$plot4 = renderPlot({
print(plot4())
})
summaryMLE = reactive({if(!is.null(lmerAnalysis())){
mle = isolate(lmerAnalysis())
return({summary(mle$mod1)})
}})
output$summaryMLE = renderPrint({
outputOptions(output,"summaryMLE",suspendWhenHidden=FALSE)
if(!is.null(lmerAnalysis())){
# mle = isolate(lmerAnalysis())
str = unlist(strsplit(as.character(summaryMLE()[1]), "\n"))
return(summaryMLE())}
})
## Markov Graph
plotGRAPH5 = reactive({if(!is.null(EntropyAnalysisGroup1())){
Group1Data = EntropyAnalysisGroup1()
`%notin%` <- Negate(`%in%`)
if(input$label1 == ""){
group = "Group1"
}
else{group = input$label1}
markovdata = markovmodelR(Group1Data$counts2, input$pseudocount)
print(markovdata$nodeSize)
g.copy = 0
g <- graph.adjacency(as.matrix(markovdata$TPM), weighted=TRUE)
print(g)
sortedWeights = sort(E(g)$weight, decreasing = T)[1:10]
sortedWeights
print(E(g)$weight)
g.copy <- delete.edges(g, which(E(g)$weight %notin% sortedWeights))
deg <- degree(g.copy, mode="all")
V(g.copy)$size = rowMeans(Group1Data$counts1)*.8
E(g.copy)$arrow.size <- 1.2
E(g.copy)$edge.color <- "gray80"
E(g.copy)$width <- E(g.copy)$weight*15
V(g.copy)$label.cex = .7
E(g.copy)$arrow.mode = 2
V(g.copy)$color = "firebrick3"
main = paste("Transition Graph for", group, sep = " ")
return(plot(g.copy, main = main, layout=layout_in_circle(g.copy), vertex.label.color= "black",
vertex.label.family = "Helvetica", edge.label.font = 2))
}
else(return())
})
output$plot5 = renderPlot({
if(!is.null(plotGRAPH5())){
plotGRAPH5()}
else(NULL)
})
plotGRAPH6 = reactive({if(!is.null(EntropyAnalysisGroup2())){
`%notin%` <- Negate(`%in%`)
Group2_Data = EntropyAnalysisGroup2()
if(input$label2 == ""){
group = "Group2"
}
else{group = input$label2}
markovdata = markovmodelR(Group2_Data$counts2, input$pseudocount)
g.copy = 0
g <- graph.adjacency(as.matrix(markovdata$TPM), weighted=TRUE)
sortedWeights = sort(E(g)$weight, decreasing = T)[1:10]
sortedWeights
print(E(g)$weight)
g.copy <- delete.edges(g, which(E(g)$weight %notin% sortedWeights))
deg <- degree(g.copy, mode="all")
V(g.copy)$size = rowMeans(Group2_Data$counts1)*.8
E(g.copy)$arrow.size <- 1.2
E(g.copy)$edge.color <- "gray80"
E(g.copy)$width <- E(g.copy)$weight*15
V(g.copy)$label.cex = .7
E(g.copy)$arrow.mode = 2
V(g.copy)$color="deepskyblue3"
main = paste("Transition Graph for", group, sep = " ")
return(plot(g.copy, main = main, layout=layout_in_circle(g.copy), vertex.label.color= "black",
vertex.label.family = "Helvetica", edge.label.font = 2))
}
else(return())
})
output$plot6 = renderPlot({
if(!is.null(EntropyAnalysisGroup2())){
plotGRAPH6()}
})
### JS divergence
JS_div = reactive({if(!is.null(EntropyAnalysisGroup1())){
Group1Data = EntropyAnalysisGroup1()
if(input$label1 == ""){
group = "Group1"
}
else{group = input$label1}
tpm1 = markovmodelR(Group1Data$counts2, input$pseudocount)
Group2Data = EntropyAnalysisGroup2()
if(input$label2 == ""){
group = "Group2"
}
else{group = input$label2}
tpm2 = markovmodelR(Group2Data$counts2, input$pseudocount)
JS_df = JS_R(data.frame(tpm1), data.frame(tpm2))
JS = data.frame(JS_df$JS_df)
JS$call = row.names(data.frame(tpm1$TPM))
colnames(JS) = c("JS_Divergence", "call")
newdata <- JS[order(JS$JS_Divergence),]
return(ggplot(JS, aes(x = reorder(call, -JS_Divergence), y = JS_Divergence, fill=JS_Divergence)) + geom_bar(stat = "identity")+ scale_fill_distiller(palette = "Spectral")+xlab("Calls")+ylab("Jensen-Shannon Divergence"))
}
else(return())
})
output$JS_plot = renderPlot({
if(!is.null(EntropyAnalysisGroup2())){
JS_div()}
})
boruta = reactive({
if(!is.null(EntropyAnalysisGroup2()) & !is.null(EntropyAnalysisGroup1()) & input$selectB != 0) {
Group2_Data = EntropyAnalysisGroup2()
Group1_Data = EntropyAnalysisGroup1()
if (input$selectB == 1){
Group1 = (Group1_Data$F1)
Group2 = (Group2_Data$F1)
#print(Group1[['C']])
data1 = c()
#print(Group1)
for(call in alphabetH1){
#print(as.vector(Group1[[call]]))
data1 = cbind(data1,as.vector(Group1[[call]]))
}
groupDataGroup1 = as.matrix(data1)
Group = c(rep(1,nrow(Group1)))
groupDataGroup1 = cbind(groupDataGroup1,Group)
# print(groupDataGroup1)
data2 = c()
#print(Group1)
for(call in alphabetH1){
#print(as.vector(Group2[[call]]))
data2 = cbind(data2,as.vector(Group2[[call]]))
}
groupDataGroup2 = as.matrix(data2)
Group = c(rep(2,nrow(Group2)))
groupDataGroup2 = cbind(groupDataGroup2,Group)
colnames(groupDataGroup1) = c(alphabetH1, "Group")
colnames(groupDataGroup2) = c(alphabetH1, "Group")
#print(groupDataGroup2)
}
if (input$selectB == 2){
Group1 = (Group1_Data$F2)
colnames(Group1) = gsub("\\t", "\t", colnames(Group1))
print(Group1)
Group2 = (Group2_Data$F2)
colnames(Group2) = gsub("\\t", "\t", colnames(Group2))
#print(Group1[['C']])
data1 = c()
data2 = c()
#print(Group1)
for(call in colnames(Group1)){
#print(as.vector(Group1[[call]]))
data1 = cbind(data1,as.vector(Group1[[call]]))
data2 = cbind(data2,as.vector(Group2[[call]]))
}
groupDataGroup1 = as.matrix(data1)
Group = c(rep(1,nrow(Group1)))
groupDataGroup1 = cbind(groupDataGroup1,Group)
# print(groupDataGroup1)
#print(Gro
groupDataGroup2 = as.matrix(data2)
Group = c(rep(2,nrow(Group2)))
groupDataGroup2 = cbind(groupDataGroup2,Group)
colnames(groupDataGroup1) = c(unlist(alphabets$H2_noTab), "Group")
colnames(groupDataGroup2) = c(unlist(alphabets$H2_noTab), "Group")
#print(groupDataGroup2)
}
if (input$selectB == 3){
Group1 = (Group1_Data$F3)
Group2 = (Group2_Data$F3)
#print(Group1[['C']])
data1 = c()
data2 = c()
#print(Group1)
for(call in colnames(Group1)){
#print(as.vector(Group1[[call]]))
data1 = cbind(data1,as.vector(Group1[[call]]))
data2 = cbind(data2,as.vector(Group2[[call]]))
}
groupDataGroup1 = as.matrix(data1)
groupDataGroup2 = as.matrix(data2)
Group = c(rep(1,nrow(Group1)))
groupDataGroup1 = cbind(groupDataGroup1,Group)
# print(groupDataGroup1)
Group = c(rep(2,nrow(Group2)))
groupDataGroup2 = cbind(groupDataGroup2,Group)
colnames(groupDataGroup1) = c(unlist(alphabets$H3_noTab), "Group")
colnames(groupDataGroup2) = c(unlist(alphabets$H3_noTab), "Group")
#print(groupDataGroup2)
}
if (input$selectB == 4 & input$selectH == 4){
Group1 = (Group1_Data$F4)
Group2 = (Group2_Data$F4)
#print(Group1[['C']])
data1 = c()
data2 = c()
#print(Group1)
for(call in colnames(Group1)){
#print(as.vector(Group1[[call]]))
data1 = cbind(data1,as.vector(Group1[[call]]))
data2 = cbind(data2,as.vector(Group2[[call]]))
}
groupDataGroup1 = as.matrix(data1)
Group = c(rep(1,nrow(Group1)))
groupDataGroup1 = cbind(groupDataGroup1,Group)
# print(groupDataGroup1)
groupDataGroup2 = as.matrix(data2)
Group = c(rep(2,nrow(Group2)))
groupDataGroup2 = cbind(groupDataGroup2,Group)
colnames(groupDataGroup1) = c(unlist(alphabets$H4_noTab), "Group")
colnames(groupDataGroup2) = c(unlist(alphabets$H4_noTab), "Group")
#print(groupDataGroup2)
}else(return)
set.seed(7777)
#print(groupDataHT)
borutaDF = rbind(groupDataGroup2,groupDataGroup1)
#colnames(borutaDF)[ncol(borutaDF)] = "Group"
# borutaDF[,"Group"] = as.factor( borutaDF[,"Group"])
#print(typeof(borutaDF))
borutaDF = as.data.frame(borutaDF)
#colnames(borutaDF) = c(alphabetH1, "Group")
print(borutaDF)
b = Boruta(Group~.,data=borutaDF,pValue = 0.001)
return(b)}
else(return(NULL))
})
borutaplot = reactive({
if(!is.null(boruta())){
calls.boruta = boruta()
return(
plot(calls.boruta, colCode = c("darkseagreen4", "goldenrod1", "firebrick", "dodgerblue3"),las = 2, cex.axis=.8))
#plotImpHistory(b, xlab = "Classifier run",
#ylab = "Importance")
}
else(stop("Upload folder") )
})
output$borutaplot = renderPlot({
if(!is.null(boruta())){
print(borutaplot())}
})
borutaStats = reactive({
if(!is.null(boruta())){
calls.boruta = boruta()
stats = attStats(calls.boruta)
statsConfirmed = subset(stats, decision == "Confirmed")
return(statsConfirmed)
}
else(stop("Upload folder") )
})
output$bStats = renderTable({
borutaStats()
},include.rownames=TRUE
)
output$borutaStats <- downloadHandler(
filename = function(){
paste("Boruta-", Sys.Date(), ".csv", sep = "")
},
content = function(file) {
write.csv(borutaStats(), file, row.names = T)
})
output$download <- downloadHandler(
filename = function() {
paste("Harpia_Output", "zip", sep=".")
},
content = function(fname) {
fs <- c()
tmpdir <- tempdir()
setwd(tempdir())
pdf("HarpiaGraphics.pdf", width = 8, height = 6)
print(plot1())
if(input$label1 == ""){
group = "Group1"
}
else{group = input$label1}
Group1Data = EntropyAnalysisGroup1()
`%notin%` <- Negate(`%in%`)
if(input$label1 == ""){
group = "Group1"
}
else{group = input$label1}
markovdata = markovmodelR(Group1Data$counts2, input$pseudocount)
print(markovdata$nodeSize)
g.copy = 0
g <- graph.adjacency(as.matrix(markovdata$TPM), weighted=TRUE)
print(g)
sortedWeights = sort(E(g)$weight, decreasing = T)[1:10]
sortedWeights
print(E(g)$weight)
g.copy <- delete.edges(g, which(E(g)$weight %notin% sortedWeights))
deg <- degree(g.copy, mode="all")
V(g.copy)$size = rowMeans(Group1Data$counts1)*.6
E(g.copy)$arrow.size <- 1.2
E(g.copy)$edge.color <- "gray80"
E(g.copy)$width <- E(g.copy)$weight*30
V(g.copy)$label.cex = .7
E(g.copy)$arrow.mode = 2
V(g.copy)$color = "deepskyblue3"
main = paste("Transition Graph for", group, sep = " ")
print(plot(g.copy, main = main, layout=layout_in_circle(g.copy), vertex.label.color= "black",
vertex.label.family = "Helvetica", edge.label.font = 2))
Group2Data = EntropyAnalysisGroup2()
if(input$label2 == ""){
group = "Group2"
}
else{group = input$label2}
markovdata2 = markovmodelR(Group2Data$counts2, input$pseudocount)
g.copy = 0
g <- graph.adjacency(as.matrix(markovdata2$TPM), weighted=TRUE)
print(g)
sortedWeights = sort(E(g)$weight, decreasing = T)[1:10]
sortedWeights
print(E(g)$weight)
g.copy <- delete.edges(g, which(E(g)$weight %notin% sortedWeights))
deg <- degree(g.copy, mode="all")
V(g.copy)$size = rowMeans(Group1Data$counts1)*.6
E(g.copy)$arrow.size <- 1.2
E(g.copy)$edge.color <- "gray80"
E(g.copy)$width <- E(g.copy)$weight*30
V(g.copy)$label.cex = .7
E(g.copy)$arrow.mode = 2
V(g.copy)$color = "firebrick3"
main = paste("Transition Graph for", group, sep = " ")
print(plot(g.copy, main = main, layout=layout_in_circle(g.copy), vertex.label.color= "black",
vertex.label.family = "Helvetica", edge.label.font = 2))
MLEData = lmerAnalysis()
mle = isolate(lmerAnalysis())
print({qqnorm(resid(mle$mod1))
qqline(resid(mle$mod1))})
print(plot3())
MLEData = MLEData$MLEData
x = interaction(MLEData$Genotype,MLEData$Level)
if(input$label1 != ""){
label1 = input$label1
}
else{label1 = "Group1" }
if(input$label2 != ""){
label2 = input$label2
}
else{label2 = "Group2" }
print(
ggplot(MLEData, aes(x= x , y=Entropy, fill= Genotype)) + geom_boxplot() + scale_fill_manual(labels=c(label1,label2),values=c('brown4','darkslategray')))
#print(borutaplot())
calls.boruta = boruta()
plot(calls.boruta, colCode = c("darkseagreen4", "goldenrod1", "firebrick", "dodgerblue3"),las = 2, cex.axis=.4)
#plotImpHistory(b, xlab = "Classifier run",ylab = "Importance")
# print(borutaStats())
dev.off()
fs = c(fs, "HarpiaGraphics.pdf")
stats = attStats(calls.boruta)
write.csv(stats, "BorutaStats.csv", row.names = T)
#bstats = borutaStats()
fs = c(fs, "BorutaStats.csv")
out<-capture.output(summaryMLE())
cat(out,file="LinearModel.txt",sep="\n",append=TRUE)
fs = c(fs, "LinearModel.txt")
zip(zipfile=fname, files=fs)
},
contentType = "application/zip"
)
### PopOvers:
addPopover(session=session, id="help1", title="",
content="Mixed-effects linear model where genotype is a fixed effect and entropy level is a random effect. Because is expected a different baseline entropy for each mouse and for the change in entropy between each entropy level to vary between mice, a random intercept, random slope model was applied.", placement = "bottom",
trigger = "click", options = NULL)
addPopover(session=session, id="help2", title="",
content="Markov Model graph for the transitions between two calls. Thickness of edges and size of nodes represent the relative proportion of a transition and call numbers, respectively. Complex calls are represented by red nodes and simple calls are represented by blue nodes.", placement = "bottom",
trigger = "click", options = NULL)
addPopover(session=session, id="help3", title="",
content="Sparse Partial Least Squares Determination Analysis is used to perform variable selection and classification in a one step procedure.", placement = "bottom",
trigger = "click", options = NULL)
addPopover(session=session, id="help4", title="",
content="Feature selection algorithm using Random Forest classification. It iteratively removes features proved to be less relevant than random probes", placement = "bottom",
trigger = "click", options = NULL)
observeEvent(input$refresh, {
js$refresh();
})
})
})})
runApp(list(
ui=ui,
server=server
))
|
59e131de7fdd6904e75814fb147f4f5c88ef8336
|
0d2d79c5ba3b638f429dcd13eb1f7d6fbb21b9ac
|
/server.R
|
7e9f1c9a790e63b1ff6cc81787ec69855c3f19b6
|
[] |
no_license
|
chrishatesbeer/DevelopingDataProducts
|
c3be55933ec19fc74f877d9bef82bef5066b23df
|
fb991142385acfc37e440ff3443b8b02d45981a5
|
refs/heads/master
| 2021-01-01T05:54:42.447804
| 2017-07-15T08:55:57
| 2017-07-15T08:55:57
| 97,303,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 730
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
mtcars$cyl <- as.factor(mtcars$cyl)
model = lm(mpg ~ wt + disp + cyl, data=mtcars)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$estimated <- renderText({
newcar = data.frame(wt=input$wt, disp=input$disp, cyl=factor(input$cyl))
paste(round(predict(model, newcar), digits=2), " estimated MPG for a car with ",input$cyl, " cylinders, weighing ", input$wt, " tonnes and a displacement of ", input$disp )
})
})
|
28db444c052975b02e7ddbd34041fca3e5136564
|
8c88871493dab02a680fc47a2afe295e01fa0d1b
|
/man/coarse_body.Rd
|
63a6fe349736eb05f9095db79b2ab58a1e96bb66
|
[] |
no_license
|
ryansar/lungct
|
5a0e44815f428860aa2e33f3e2a469700410eba3
|
592b80efa9d7e1e62eecdaf8625896c2eae78c2f
|
refs/heads/master
| 2020-03-22T12:04:27.845557
| 2019-08-12T14:31:27
| 2019-08-12T14:31:27
| 140,015,196
| 4
| 0
| null | 2018-07-06T17:52:04
| 2018-07-06T17:52:04
| null |
UTF-8
|
R
| false
| true
| 434
|
rd
|
coarse_body.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coarse_body.R
\name{coarse_body}
\alias{coarse_body}
\title{Coarse Masking of Image from Z-slicding}
\usage{
coarse_body(x, keep_slices = 20)
}
\arguments{
\item{x}{Binary \code{antsImage}}
\item{keep_slices}{number of slices to keep if needed}
}
\value{
Array or \code{antsImage} object
}
\description{
Calculates a Hull at each slice then stacks them
}
|
99528411a0d2bcdf6255532bfc03a6b1c12bad87
|
0512cbc5d4eff295f4ff17b3a147232dd733ec47
|
/content/simulate-ddi.R
|
ef41d817af944366ffa47e7e67d8158802881ee0
|
[] |
no_license
|
Karnpob-coder/r-pharma-pkpd-2020
|
648faa15bf2a2cae6dd00232f9e80b106d819633
|
9e664e0ebb1ad4224f47ffb315d2e3e076cb84aa
|
refs/heads/master
| 2023-03-17T06:00:08.033758
| 2020-10-09T14:19:19
| 2020-10-09T14:19:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
simulate-ddi.R
|
library(tidyverse)
library(PKPDmisc)
library(mrgsolve)
mod <- mread("model/yoshikado.cpp", end = 12, delta = 0.1)
ddi <- c(
ev(amt = 2000, cmt = 2, time = 0),
ev(amt = 30, cmt = 1, time = 1)
)
n <- 2000
idata <- tibble(ikiu = rlnorm(n, log(mod$ikiu),sqrt(0.09)))
out <- mrgsim_ei(mod, events = ddi, idata = idata)
head(out)
summ <-
out %>%
group_by(ID) %>%
summarise(auc = auc_partial(time,CP), .groups = "drop")
ggplot(summ, aes(x = auc)) + geom_histogram(col = "white")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.