blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0ed915b4b9f9f1cca634c47af808032263ff238
|
c299d9362756fcd7b43ef0d99576ffbe4914e7cb
|
/Project-2-Models.R
|
7bcd2b41f3e5917d6131ccb956f61bcc6c1ea2a7
|
[] |
no_license
|
AJOssege/Predicting-Diabetes-Diagnosis
|
af02805a116ff9eb9a51c9fbfb476f1a55ea8f55
|
3aa361a1360651f902693b650cbbb39ed729334b
|
refs/heads/main
| 2023-01-30T22:13:06.451177
| 2020-12-07T00:41:47
| 2020-12-07T00:41:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,291
|
r
|
Project-2-Models.R
|
## Set the current directory to Project-2
setwd("C:/SP-PERSONAL/CollegeWork-UVA-MSDS/STAT-6021/Project-2/")
getwd()
## Load the required libraries
library(faraway)
library(leaps)
## Diabetes data set after cleaning the unnecessary and removing the blank values.
data = read.csv("diabetes_data_sub.csv")
## Data variables.
##[1] "chol" "stab.glu" "hdl" "glyhb" "location" "age"
##[7] "gender" "bp.1s" "bp.1d" "waist" "hip" "time.ppn"
##[13] "hasDiabetes" "bmi" "bmiCat"
# Check and make them Categorical predictors
is.factor(data$location)
is.factor(data$gender)
is.factor(data$bmiCat)
is.factor(data$hasDiabetes)
data$hasDiabetes<-factor(data$hasDiabetes)
is.factor(data$hasDiabetes)
# Check the Indicator variables
levels(data$location)
levels(data$gender)
levels(data$bmiCat)
levels(data$hasDiabetes)
## Check the Dummy variables set for the Categorical vaiables
contrasts(data$location)
contrasts(data$gender)
contrasts(data$bmiCat)
contrasts(data$hasDiabetes)
## Check the Multicollinearity
result<-lm(glyhb~.,data)
summary(result)
# Find the best values for the model selection
allreg <- regsubsets(glyhb ~., data=data, nbest=9)
best <- as.data.frame(summary(allreg)$outmat)
best$p <- as.numeric(substr(rownames(best),1,1))+1
best$r2 <- summary(allreg)$rsq
best$adjr2 <- summary(allreg)$adjr2
best$mse <- (summary(allreg)$rss)/(dim(data)[1]-best$p)
best$cp <- summary(allreg)$cp
best$bic <- summary(allreg)$bic
best
##sort by various criteria
best[order(best$r2),] # large r2 is better
best[order(best$adjr2),] # large adj r2 is better
best[order(best$mse),] # small mse is better
best[order(best$cp),] # small cp is better
best[order(best$bic),] # small bic is better
##intercept only model
regnull <- lm(glyhb~1, data=data)
##model with all predictors
regfull <- lm(glyhb~., data=data)
##forward selection, backward elimination, and stepwise regression
step(regnull, scope=list(lower=regnull, upper=regfull), direction="forward")
step(regfull, scope=list(lower=regnull, upper=regfull), direction="backward")
step(regnull, scope=list(lower=regnull, upper=regfull), direction="both")
## Finding the R-square and adj R-square values with the suggested Models
result<-lm(glyhb~chol+stab.glu+chol+age+location+hasDiabetes,data=data)
summary(result)
##############################################################################
## Without hasDiabetes variable in the model
##############################################################################
# Excluding hasDiabetes variable from the dataset
no_hasdiabetes<-data[,-13]
## Check the Multicollinearity
result<-lm(glyhb~.,no_hasdiabetes)
summary(result)
# Find the best values for the model selection
allreg <- regsubsets(glyhb ~., data=no_hasdiabetes, nbest=9)
best <- as.data.frame(summary(allreg)$outmat)
best$p <- as.numeric(substr(rownames(best),1,1))+1
best$r2 <- summary(allreg)$rsq
best$adjr2 <- summary(allreg)$adjr2
best$mse <- (summary(allreg)$rss)/(dim(data)[1]-best$p)
best$cp <- summary(allreg)$cp
best$bic <- summary(allreg)$bic
best
##sort by various criteria
best[order(best$r2),] # large r2 is better
best[order(best$adjr2),] # large adj r2 is better
best[order(best$mse),] # small mse is better
best[order(best$cp),] # small cp is better
best[order(best$bic),] # small bic is better
##intercept only model
regnull <- lm(glyhb~1, data=no_hasdiabetes)
##model with all predictors
regfull <- lm(glyhb~., data=no_hasdiabetes)
##forward selection, backward elimination, and stepwise regression
step(regnull, scope=list(lower=regnull, upper=regfull), direction="forward")
step(regfull, scope=list(lower=regnull, upper=regfull), direction="backward")
step(regnull, scope=list(lower=regnull, upper=regfull), direction="both")
## Finding the R-square and adj R-square values with the suggested Models
result<-lm(glyhb~chol+stab.glu+age+location+time.ppn+hdl,data=no_hasdiabetes)
summary(result)
## Initial Model with predictors found with above methods
result<-lm(glyhb~chol+stab.glu+hdl+location+age+bmiCat+time.ppn+waist+bmi,data=data)
summary(result)
|
190f1d9133dbed60c7075ce5412d7b774b9abc1a
|
f2af9ca57a5c8ba048f0cf62887c89908d5c4d9f
|
/server.R
|
f3fb07e43bc39ae71fd11ac85973a56f4c0e103c
|
[] |
no_license
|
tarun12reddy/CalculatorShiny
|
4fb90c2f973b423d9d9a89f1ca18868734a74bf0
|
e373bc4aa616cd5be4c7c28c349f58c46f8585bc
|
refs/heads/master
| 2021-01-01T18:01:48.453705
| 2015-03-17T17:30:30
| 2015-03-17T17:30:30
| 32,408,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,059
|
r
|
server.R
|
shinyServer(function(input, output, session){
Input <- reactive({
input_1 <- input$var1
input_2 <- input$var2
input_sym <- input$var3
input_all <- c(input_1, input_2, input_sym)
return (input_all)
})
output$value <- renderText({
data <- Input()
if (as.numeric(data[2]) == 0){
ans = "Number 2 cannot be zero"
} else {
if(data[3] == "+"){
ans = as.numeric(data[1]) + as.numeric(data[2])
}
if(data[3] == "-"){
ans = as.numeric(data[1]) - as.numeric(data[2])
}
if(data[3] == "*"){
ans = as.numeric(data[1]) * as.numeric(data[2])
}
if(data[3] == "/"){
ans = as.numeric(data[1]) / as.numeric(data[2])
}
}
as.character(ans)
})
output$Summary <- renderUI(HTML(
"<ul>
<li> Small Demonstration of Implementation of Calculator </li>
</ul>"))
})
|
dbdcdca84b82a146af05cf341c32e002b90b36bf
|
12e09fb76eac4d76ef25c4214d6a40128282818d
|
/man/stj_ler_julgados.Rd
|
cf273f6551f54fe675450c52c7e089e7dcbbae21
|
[
"MIT"
] |
permissive
|
jjesusfilho/stj
|
76d5676c447a9fb58e42f4bdec9a0761c0a43c0a
|
2a4ccd87ba81197b4f929839830ca71135e15aea
|
refs/heads/master
| 2023-06-22T03:30:11.940248
| 2023-06-20T16:55:25
| 2023-06-20T16:55:25
| 202,847,868
| 12
| 6
| null | null | null | null |
UTF-8
|
R
| false
| true
| 464
|
rd
|
stj_ler_julgados.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stj_ler_julgados.R
\name{stj_ler_julgados}
\alias{stj_ler_julgados}
\title{Lê julgados do STJ}
\usage{
stj_ler_julgados(arquivos = NULL, diretorio = ".")
}
\arguments{
\item{arquivos}{lista de arquivos}
\item{diretorio}{se arquivos não forem informados,
informar diretório}
}
\value{
dataframe
}
\description{
Lê julgados do STJ
}
\examples{
\dontrun{
df <- stj_ler_julgados()
}
}
|
b60b665decae64cc9d0a99cdcc6005084793d449
|
521c587338a8f6355ce1efb260ba590cf22848ad
|
/cachematrix.R
|
0a37d5c1949f6f84722a6c998f46cb4fdfac38bb
|
[] |
no_license
|
s-vichu/ProgrammingAssignment2
|
68b9328ed14cbcf7d4ceaee0c2887b582d36d365
|
0be4755686e02fe6560fee7bd4ef4e3a1264faff
|
refs/heads/master
| 2021-01-09T06:23:03.562849
| 2014-04-25T06:51:01
| 2014-04-25T06:51:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,920
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix takes a R matrix object and encaptulates it into a function
## consisting of it's inverse along with getters and setters for both
makeCacheMatrix <- function(x = matrix()) {
if (is.null(x)) return(NULL) ## If the argument is NULL, then nothing to do
## Initialize both the original and inverted matrices
invert.matrix <- NULL
original.matrix <- x
## Set the original matrix using "Super assignment" operator
setMatrix <- function(y) {
original.matrix <<- y
invert.matrix <<- NULL
}
## Get the original matrix
getMatrix <- function() {
original.matrix
}
## Set the inverted matrix
setInvert <- function(invert) {
invert.matrix <<- invert
}
## Get the inverted matrix
getInvert <- function() {
invert.matrix
}
## Return the list with all 4 operator functions
list(set = setMatrix,
get = getMatrix,
setInvert = setInvert,
getInvert = getInvert)
}
## cacheSolve takes a cached matrix object and returns the inverse
## It checks to see if the invert has already been computed. If so, it confirms that the original
## matrix has not changed since last invert computation. If not changed, then it returns inverse
## from the cache. Else it computes the inverse, caches it and then returns
cacheSolve <- function(x, ...) {
## Get both the original and inverted matrices
invert <- x$getInvert()
## If the inverse had been computed before, then return from cache
if (!is.null(invert)) {
## Returning from the cache
message("Returing from cache")
return(invert)
}
## Else compute and then return
original <- x$get()
invert <- solve(original)
x$setInvert(invert)
invert
}
|
3b0fb07c79fbff9de5fe27198477f29293c30c17
|
9132996d08213cdf27c8f6d444e3f5b2cfdcfc85
|
/man/add_feature_contiguity_constraints.Rd
|
6d49b1c0512d6315334235d92f21eb8441c995aa
|
[] |
no_license
|
prioritizr/prioritizr
|
152013e81c1ae4af60d6e326e2e849fb066d80ba
|
e9212a5fdfc90895a3638a12960e9ef8fba58cab
|
refs/heads/main
| 2023-08-08T19:17:55.037205
| 2023-08-08T01:42:42
| 2023-08-08T01:42:42
| 80,953,648
| 119
| 30
| null | 2023-08-22T01:51:19
| 2017-02-04T22:45:17
|
R
|
UTF-8
|
R
| false
| true
| 11,341
|
rd
|
add_feature_contiguity_constraints.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_feature_contiguity_constraints.R
\encoding{UTF-8}
\name{add_feature_contiguity_constraints}
\alias{add_feature_contiguity_constraints}
\alias{add_feature_contiguity_constraints,ConservationProblem,ANY,matrix-method}
\alias{add_feature_contiguity_constraints,ConservationProblem,ANY,data.frame-method}
\alias{add_feature_contiguity_constraints,ConservationProblem,ANY,Matrix-method}
\alias{add_feature_contiguity_constraints,ConservationProblem,ANY,ANY-method}
\title{Add feature contiguity constraints}
\usage{
\S4method{add_feature_contiguity_constraints}{ConservationProblem,ANY,data.frame}(x, zones, data)
\S4method{add_feature_contiguity_constraints}{ConservationProblem,ANY,matrix}(x, zones, data)
\S4method{add_feature_contiguity_constraints}{ConservationProblem,ANY,ANY}(x, zones, data)
}
\arguments{
\item{x}{\code{\link[=problem]{problem()}} object.}
\item{zones}{\code{matrix}, \code{Matrix} or \code{list} object describing
the connection scheme for different zones. For \code{matrix} or
and \code{Matrix} arguments, each row and column corresponds
to a different zone in the argument to \code{x}, and cell values must
contain binary \code{numeric} values (i.e., one or zero) that indicate
if connected planning units (as specified in the argument to
\code{data}) should be still considered connected if they are allocated to
different zones. The cell values along the diagonal
of the matrix indicate if planning units should be subject to
contiguity constraints when they are allocated to a given zone. Note
arguments to \code{zones} must be symmetric, and that a row or column has
a value of one then the diagonal element for that row or column must also
have a value of one. If the connection scheme between different zones
should differ among the features, then the argument to \code{zones} should
be a \code{list} of \code{matrix} or \code{Matrix} objects that shows the
specific scheme for each feature using the conventions described above.
The default argument to \code{zones} is an identity
matrix (i.e., a matrix with ones along the matrix diagonal and zeros
elsewhere), so that planning units are only considered connected if they
are both allocated to the same zone.}
\item{data}{\code{NULL}, \code{matrix}, \code{Matrix}, \code{data.frame}
or \code{list} of \code{matrix}, \code{Matrix}, or \code{data.frame}
objects. The argument to data shows which planning units should be treated
as being connected when implementing constraints to ensure that features
are represented in contiguous units. If different features have
different dispersal capabilities, then it may be desirable to specify
which sets of planning units should be treated as being connected
for which features using a \code{list} of objects. The default argument
is \code{NULL} which means that the connection data is calculated
automatically using the \code{\link[=adjacency_matrix]{adjacency_matrix()}} function and so
all adjacent planning units are treated as being connected for all
features. See the Data format section for more information.}
}
\value{
An updated \code{\link[=problem]{problem()}} object with the constraints added to it.
}
\description{
Add constraints to a problem to ensure that each feature is
represented in a contiguous unit of dispersible habitat. These constraints
are a more advanced version of those implemented in the
\code{\link[=add_contiguity_constraints]{add_contiguity_constraints()}} function, because they ensure that
each feature is represented in a contiguous unit and not that the entire
solution should form a contiguous unit. Additionally, this function
can use data showing the distribution of dispersible habitat for each
feature to ensure that all features can disperse throughout the areas
designated for their conservation.
}
\details{
This function uses connection data to identify solutions that
represent features in contiguous units of dispersible habitat.
It was inspired by the mathematical formulations detailed in
Önal and Briers (2006) and Cardeira \emph{et al.} 2010. For an
example that has used these constraints, see Hanson \emph{et al.} (2019).
Please note
that these constraints require the expanded formulation and therefore
cannot be used with feature data that have negative vales.
\strong{Please note that adding these constraints to a problem will
drastically increase the amount of time required to solve it.}
}
\section{Data format}{
The argument to \code{data} can be specified using the following formats.
\describe{
\item{\code{data} as a \code{NULL} value}{connection
data should be calculated automatically
using the \code{\link[=adjacency_matrix]{adjacency_matrix()}} function. This is the default
argument and means that all adjacent planning units are treated
as potentially dispersible for all features.
Note that the connection data must be manually defined
using one of the other formats below when the planning unit data
in the argument to \code{x} is not spatially referenced (e.g.,
in \code{data.frame} or \code{numeric} format).}
\item{\code{data} as a\code{matrix}/\code{Matrix} object}{where rows and columns represent
different planning units and the value of each cell indicates if the
two planning units are connected or not. Cell values should be binary
\code{numeric} values (i.e., one or zero). Cells that occur along the
matrix diagonal have no effect on the solution at all because each
planning unit cannot be a connected with itself. Note that pairs
of connected planning units are treated as being potentially dispersible
for all features.}
\item{\code{data} as a \code{data.frame} object}{containing columns that are named
\code{"id1"}, \code{"id2"}, and \code{"boundary"}. Here, each row
denotes the connectivity between two planning units following the
\emph{Marxan} format. The \code{"boundary"} column should contain
binary \code{numeric} values that indicate if the two planning units
specified in the \code{"id1"} and \code{"id2"} columns are connected
or not. This data can be used to describe symmetric or
asymmetric relationships between planning units. By default,
input data is assumed to be symmetric unless asymmetric data is
also included (e.g., if data is present for planning units 2 and 3, then
the same amount of connectivity is expected for planning units 3 and 2,
unless connectivity data is also provided for planning units 3 and 2).
Note that pairs of connected planning units are treated as being
potentially dispersible for all features.}
\item{\code{data} as a \code{list} object}{containing \code{matrix}, \code{Matrix}, or
\code{data.frame} objects showing which planning units
should be treated as connected for each feature. Each element in the
\code{list} should correspond to a different feature (specifically,
a different target in the problem), and should contain a \code{matrix},
\code{Matrix}, or \code{data.frame} object that follows the conventions
detailed above.}
}
}
\section{Notes}{
In early versions, it was named as the \code{add_corridor_constraints} function.
}
\examples{
\dontrun{
# load data
sim_pu_raster <- get_sim_pu_raster()
sim_features <- get_sim_features()
sim_zones_pu_raster <- get_sim_zones_pu_raster()
sim_zones_features <- get_sim_zones_features()
# create minimal problem
p1 <-
problem(sim_pu_raster, sim_features) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.3) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# create problem with contiguity constraints
p2 <- p1 \%>\% add_contiguity_constraints()
# create problem with constraints to represent features in contiguous
# units
p3 <- p1 \%>\% add_feature_contiguity_constraints()
# create problem with constraints to represent features in contiguous
# units that contain highly suitable habitat values
# (specifically in the top 5th percentile)
cm4 <- lapply(seq_len(terra::nlyr(sim_features)), function(i) {
# create connectivity matrix using the i'th feature's habitat data
m <- connectivity_matrix(sim_pu_raster, sim_features[[i]])
# convert matrix to 0/1 values denoting values in top 5th percentile
m <- round(m > quantile(as.vector(m), 1 - 0.05, names = FALSE))
# remove 0s from the sparse matrix
m <- Matrix::drop0(m)
# return matrix
m
})
p4 <- p1 \%>\% add_feature_contiguity_constraints(data = cm4)
# solve problems
s1 <- c(solve(p1), solve(p2), solve(p3), solve(p4))
names(s1) <- c(
"basic solution", "contiguity constraints",
"feature contiguity constraints",
"feature contiguity constraints with data"
)
# plot solutions
plot(s1, axes = FALSE)
# create minimal problem with multiple zones, and limit the solver to
# 30 seconds to obtain solutions in a feasible period of time
p5 <-
problem(sim_zones_pu_raster, sim_zones_features) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(matrix(0.1, ncol = 3, nrow = 5)) \%>\%
add_binary_decisions() \%>\%
add_default_solver(time_limit = 30, verbose = FALSE)
# create problem with contiguity constraints that specify that the
# planning units used to conserve each feature in different management
# zones must form separate contiguous units
p6 <- p5 \%>\% add_feature_contiguity_constraints(diag(3))
# create problem with contiguity constraints that specify that the
# planning units used to conserve each feature must form a single
# contiguous unit if the planning units are allocated to zones 1 and 2
# and do not need to form a single contiguous unit if they are allocated
# to zone 3
zm7 <- matrix(0, ncol = 3, nrow = 3)
zm7[seq_len(2), seq_len(2)] <- 1
print(zm7)
p7 <- p5 \%>\% add_feature_contiguity_constraints(zm7)
# create problem with contiguity constraints that specify that all of
# the planning units in all three of the zones must conserve first feature
# in a single contiguous unit but the planning units used to conserve the
# remaining features do not need to be contiguous in any way
zm8 <- lapply(
seq_len(number_of_features(sim_zones_features)),
function(i) matrix(ifelse(i == 1, 1, 0), ncol = 3, nrow = 3)
)
print(zm8)
p8 <- p5 \%>\% add_feature_contiguity_constraints(zm8)
# solve problems
s2 <- lapply(list(p5, p6, p7, p8), solve)
s2 <- terra::rast(lapply(s2, category_layer))
names(s2) <- c("p5", "p6", "p7", "p8")
# plot solutions
plot(s2, axes = FALSE)
}
}
\references{
Önal H and Briers RA (2006) Optimal selection of a connected
reserve network. \emph{Operations Research}, 54: 379--388.
Cardeira JO, Pinto LS, Cabeza M and Gaston KJ (2010) Species specific
connectivity in reserve-network design using graphs.
\emph{Biological Conservation}, 2: 408--415.
Hanson JO, Fuller RA, & Rhodes JR (2019) Conventional methods for enhancing
connectivity in conservation planning do not always maintain gene flow.
\emph{Journal of Applied Ecology}, 56: 913--922.
}
\seealso{
See \link{constraints} for an overview of all functions for adding constraints.
Other constraints:
\code{\link{add_contiguity_constraints}()},
\code{\link{add_linear_constraints}()},
\code{\link{add_locked_in_constraints}()},
\code{\link{add_locked_out_constraints}()},
\code{\link{add_mandatory_allocation_constraints}()},
\code{\link{add_manual_bounded_constraints}()},
\code{\link{add_manual_locked_constraints}()},
\code{\link{add_neighbor_constraints}()}
}
\concept{constraints}
|
d4e951d2790fc2eba3a1fc06c8d2b4d898ebc052
|
ddbb2f92f19467b2aeb5517a9becf0d282a61926
|
/R/symbiota.R
|
a3e7dc718b49fda57b67f91ec60a7406d1770bdb
|
[] |
no_license
|
dwmccheyne/rSymbiota
|
1ddfa023b54ac7106256c410073acf654b50d2ea
|
256945d77c4814eb6358589232a61b788891e39f
|
refs/heads/master
| 2021-05-17T08:05:40.782448
| 2019-08-06T06:25:04
| 2019-08-06T06:25:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,267
|
r
|
symbiota.R
|
#' Retrieve records from the Symbiota portals
#' @param taxon character string specifying the taxon name (e.g., species name, family name or higher taxon)
#' @param db portal name, for an overview see \code{\link{portals}}
#' @param country character string specifying country, e.g., "USA"
#' @param state character string specifying state, e.g., "Massachusetts"
#' @param county character string specifying county, e.g., "Worcester"
#' @param locality character string specifying locality, e.g., "Harvard Forest"
#' @param elevation_from character string, meter, e.g., "1000"
#' @param elevation_to character string, meter
#' @param host character string specifying host species, e.g., "Betula alba"
#' @param taxon_type integer, one of 1 to 5 representing "Family or Scientific Name", "Scientific Name only", "Family Only", "Higher Taxonomy", Common Name"
#' @param north_lat character string, coordinate e.g., "45"
#' @param south_lat character string, coordinate
#' @param west_lon character string, coordinate, e.g., "-72"
#' @param east_lon character string, coordinate
#' @param point_lat character string, coordinate
#' @param point_lon character string, coordinate
#' @param radius character string, km, e.g., "50"
#' @param collector character string specifying collector name
#' @param collector_num character string specifying collector number
#' @param coll_date1 character string specifying collection data from, e.g., "19 August 1926"
#' @param coll_date2 character string specifying collection data from, e.g., "19 August 2018"
#' @param syns logical, if TRUE synonyms from MycoBank and IndexFungorum are searched
#' @param port default is 4445L
#' @param remoteServerAddr default is "localhost
#' @param verbose logical
#' @param screenshot logical, whether screenshot of results should be displayed in Viewer
#' @param browserName character string specifying the browser to use, recommended: "chrome"
#' @param wait numberic specifying the seconds to wait for website to load, recommended 2 for good internet connections;
#' higher otherwise. It would be good to first look up the number of pages for a species and to compare it with the function output to see whether loading times are sufficient.
#' @param max_attempts maximum number of tries in case of internet instability or lost connection
#' @return x an object of class \code{records} with the following components:
#' \item{nr.records}{A numeric giving the number of records retrieved}
#' \item{citation}{A character string with the recommended citation from the website}
#' \item{query}{A list of the user arguments used}
#' \item{records}{A data.frame with the query records results (accessible via 'at' symbol)}
#' \item{db}{A character string specifying the database}
#'
#' @details Interface to the web databases of the Symbiota portals.
#' Symbiota is an open source content management system for curating specimen- and observation-based biodiversity data.
#' Currently ca. 40 portals are avaiable:
#' Consortium of North American Lichen Herbaria, Arctic Lichen Flora, Consortium of North American Bryophyte Herbaria, Frullania Collaborative Research Network, Macroalgal Consortium Herbarium Portal, MyCoPortal, Smithsonian Tropical Research Institute Portal (STRI), Aquatic Invasives, Aquatic Invasives, Aquatic Invasives, Consortium of Midwest Herbaria, SEINet, Intermountain Region Herbaria Network (IRHN), SouthEast Regional Network of Expertise and Collections (SERNEC), North American Network of Small Herbaria, Northern Great Plains Herbaria, Consortium of Northeastern Herbaria (CNH), Madrean Archipelago Biodiversity Assessment (MABA), Madrean Archipelago Biodiversity Assessment (MABA) - Fauna, Herbario Virtual Austral Americano, CoTRAM – Cooperative Taxonomic Resource for Amer. Myrtaceae, InvertEBase Data Portal, Symbiota Collections of Arthropods Network (SCAN), Lepidoptera of North America Network (LepNet), Neotropical Entomology, Neotropical Flora, Monarch (California Academy of Sciences), The Lundell Plant Diversity Portal, Virtual Flora of Wisconsin, Red de Herbarios del Noroeste de México, University of Colorado Herbarium, The Open Herbarium, Consortium of Pacific Herbaria, Minnesota Biodiversity Atlas, Documenting Ethnobiology in Mexico and Central America, OpenZooMuseum, Mid-Atlantic Herbaria Consortium, Channel Islands Biodiversity Information System, Consortium of Small Vertebrate Collections (CSVColl), The University of New Hampshire Collection of Insects and Other Arthropods.
#' For an overview and URLs see \code{\link{portals}}. The function currently searches all collections, because the package is meant for large-scale access.
#' @references \url{http://symbiota.org/docs/}
#' @references Gries, C., Gilbert, E. E., and Franz, N. M. (2014). Symbiota–a virtual platform for creating voucher-based biodiversity information communities. Biodiversity Data Journal, (2).
#'
#' @import RSelenium httr RCurl
#' @importFrom XML htmlParse xpathApply xmlValue
#' @importFrom crayon red
#' @importFrom utils capture.output
#'
#' @author Franz-Sebastian Krah
#'
#' @examples
#' \dontrun{
#' ## Download Helvella observations and plot visualize data
#' spec.dist <- symbiota(taxon = "Helvella", db = "mycoportal", wait = 3)
#' # for all available portals and examples see vignette
#' ## increase wait if your internet is slow (in general fast internet is recommended)
#'
#' # This is how the records table can be accessed:
#' recordsTable(spec.dist)
#'
#' ## However, for the other functions of the package, the output of 'symbiota'
#' ## can be directly forwarded, for example:
#'
#' plot_distmap(x = spec.dist, mapdatabase = "world", interactive = FALSE,
#' gazetter = TRUE)
#' plot_distmap(x = spec.dist, mapdatabase = "usa", interactive = FALSE)
#' plot_distmap(x = spec.dist, mapdatabase = "world", interactive = TRUE)
#' plot_datamap(x = spec.dist, mapdatabase = "state", index = "rec")
#' plot_recordstreemap(x = spec.dist, log = FALSE)
#' }
#' @export
#
# taxon = ""
# db = "Open Herbarium"
# country = "USA"
# taxon_type = 1
# syns = TRUE
# verbose = FALSE
# screenshot = FALSE
# port = 4445L
# browserName = "chrome"
# remoteServerAddr = "localhost"
# wait = 2
# library(rSymbiota)
# library(stringr)
# library(sys)
# library(rvest)
# library(XML)
# library(xml2)
# library(RSelenium)
# source("R/is_table_button.R")
# source("R/start_stop_docker.R")
# source("R/nr_pages.R")
symbiota <- function(taxon = "Amanita muscaria",
db = "mycoportal",
country = "",
state = "",
county = "",
locality = "",
elevation_from = "",
elevation_to = "",
host = "",
taxon_type = 1,
north_lat = "",
south_lat = "",
west_lon = "",
east_lon = "",
point_lat = "",
point_lon = "",
radius = "",
collector = "",
collector_num = "",
coll_date1 = "",
coll_date2 = "",
syns = TRUE,
verbose = FALSE,
screenshot = FALSE,
port = 4445L,
browserName = "chrome",
remoteServerAddr = "localhost",
wait = 4,
max_attempts = 5) {
if(length(grep(db, "The Lundell Plant Diversity Portal"))>0)
stop("This portal is currently not supported!")
# test internet conectivity
if(!url.exists("r-project.org") == TRUE)
stop( "Not connected to the internet. Please create a stable connection and try again." )
## Look up portal website
ports <- portal(db)
if(nrow(ports)>1){
rownames(ports) <- NULL
message("More than 1 portal found, please specify the number\n")
print(ports[,1, drop = FALSE])
Sys.sleep(0.5)
message("Please enter a row number:")
ent <- scan(file = "", what = "", nmax = 1)
ports <- ports[ent,]
}
portal.url <- ports$collection_url
portal.name <- trimws(ports$Portal.Name)
if(!is.character(getURL(portal.url)))
stop(paste(" Database is not available :", portal.url))
if(missing(taxon))
stop("At least a species name has to be specified")
if(length(grep("_", taxon))>0)
taxon <- gsub("_", " ", taxon)
## Test if Docker is running
out <- exec_internal("docker", args = c("ps", "-q"), error = FALSE)
if(out$status != 0)
stop("Docker not available. Please start Docker! https://www.docker.com")
## Wait should not be smaller than 2 seconds
wait <- ifelse(wait<=2, 2, wait)
# Initialize session -----------------------------------------------------
if(verbose)
message("Initialize server\n")
start_docker_try(verbose = verbose, max_attempts = 5, wait = wait)
## Set up remote
dr <- remoteDriver(remoteServerAddr = "localhost",
port = port,
browserName = browserName)
Sys.sleep(wait-1)
## Open connection; run server
out <- capture.output(dr$open(silent = FALSE))
Sys.sleep(2)
if(verbose>1)
message(out)
if(dr$getStatus()$ready)
if(verbose>1)
message(dr$getStatus()$message[1], "\n")
if(!dr$getStatus()$ready)
stop("Remote server is not running \n Please check if Docker is installed!")
# Open Website -----------------------------------------------------------
if(verbose)
message(ifelse(verbose, "Open website\n", ""))
## Navigate to website
## proceed directly to parameters website:
url <- gsub("index.php", "harvestparams.php", portal.url)
dr$navigate(url)
Sys.sleep(wait+3)
## Enter user query parameters ------------------------------------------------------
message(ifelse(verbose, "Send user query to website:\n", ""))
# store query for class
argg <- do.call(c, as.list(match.call()))
query <- argg <- argg[-1]
argg <- do.call(c, argg)
## Fill elements: user defined query input
## Checkbox: Show results in table view
# [test if wesite has a table button or a tick box for table view]
if(!is_table_button(dr)){
button <- dr$findElement('xpath', "//*[@id='showtable']")
button$clickElement()
}
## Checkbox: Include Synonyms from Taxonomic Thesaurus
# [default is ticked]
if(!syns){
button <- dr$findElement('xpath', "//*[@id='harvestparams']/div[3]/span/input")
button$clickElement()
}
## Taxon type
webElem <- dr$findElement(using = 'xpath', paste0("//*[@id='taxontype']/option[", taxon_type ,"]"))
webElem$clickElement()
## Taxon
webElem <- dr$findElement('id', "taxa")
webElem$sendKeysToElement(list(taxon))
## Country
if(country != ""){
webElem <- dr$findElement('id', "country")
webElem$sendKeysToElement(list(country))
}
## State
if(state != ""){
webElem <- dr$findElement('id', "state")
webElem$sendKeysToElement(list(state))
}
## County
if(county != ""){
webElem <- dr$findElement('id', "county")
webElem$sendKeysToElement(list(county))
}
## Locality
if(locality != ""){
webElem <- dr$findElement('id', "locality")
webElem$sendKeysToElement(list(locality))
}
## Elevation lower border
if(elevation_from != ""){
webElem <- dr$findElement('id', "elevlow")
webElem$sendKeysToElement(list(elevation_from))
}
## Elevation upper border
if(elevation_to != ""){
webElem <- dr$findElement('id', "elevhigh")
webElem$sendKeysToElement(list(elevation_to))
}
## Host (Plant species name)
if(host != ""){
webElem <- dr$findElement('id', "assochost")
webElem$sendKeysToElement(list(host))
}
##### Latitude and Longitude
## North latitude border
if(north_lat != ""){
webElem <- dr$findElement('id', "upperlat")
webElem$sendKeysToElement(list(north_lat))
}
## South latitude border
if(south_lat != ""){
webElem <- dr$findElement('id', "bottomlat")
webElem$sendKeysToElement(list(south_lat))
}
## West longitude border
if(west_lon != ""){
webElem <- dr$findElement('id', "leftlong")
webElem$sendKeysToElement(list(west_lon))
}
## East longitude border
if(state != ""){
webElem <- dr$findElement('id', "rightlong")
webElem$sendKeysToElement(list(east_lon))
}
##### Point-Radius Search
## Latitude of point
if(point_lat != ""){
webElem <- dr$findElement('id', "pointlat")
webElem$sendKeysToElement(list(point_lat))
}
## Longitude of point
if(point_lon != ""){
webElem <- dr$findElement('id', "pointlong")
webElem$sendKeysToElement(list(point_lon))
}
## Radius in km
if(radius != ""){
webElem <- dr$findElement('id', "radiustemp")
webElem$sendKeysToElement(list(radius))
}
##### Collector Criteria
## Collector name
if(collector != ""){
webElem <- dr$findElement('id', "collector")
webElem$sendKeysToElement(list(collector))
}
## Collector numbner
if(collector_num != ""){
webElem <- dr$findElement('id', "collnum")
webElem$sendKeysToElement(list(collector_num))
}
## Date record was found (from)
if(coll_date1 != ""){
webElem <- dr$findElement('id', "eventdate1")
webElem$sendKeysToElement(list(coll_date1))
}
## Date record was found (to)
if(coll_date2 != ""){
webElem <- dr$findElement('id', "eventdate2")
webElem$sendKeysToElement(list(coll_date2))
}
# Press Enter -----------------------------------------------------
if(is_table_button(dr)){
button <- dr$findElement('xpath', "//*[@id='harvestparams']/div[2]/div[2]/button")
button$clickElement()
Sys.sleep(wait+2)
}else{
webElem$sendKeysToElement(list(key = "enter"))
Sys.sleep(wait+2)
}
if(screenshot)
dr$screenshot(display = TRUE, useViewer = TRUE)
# Test whether results were found --------------------------------
res <- htmlParse(dr$getPageSource()[[1]])
res <- xpathApply(res, "//div", xmlValue)
res <- grep("No records found matching the query", res)
if(length(res)>0){
# close server
dr$close()
## stop docker
message(ifelse(verbose, "Stop Docker\n", ""))
system(
"docker stop $(docker ps -a -q)",
ignore.stdout = TRUE,
ignore.stderr = TRUE
)
message(red(paste0(paste(rep("#", 43), collapse = ""),
"\n### No records for this query ###\n",
paste(rep("#", 43), collapse = ""))))
opt <- options(show.error.messages=FALSE)
on.exit(options(opt))
return(records(nr.records = 0,
citation = "Not applicable",
query = query,
records = data.frame(NULL),
db = "MyCoPortal"))
}
# Download tables -------------------------------------------------
nr.p <- nr_pages(dr)
message(paste("Downloading", nr.p, "pages\n"))
message("Make sure you have a stable internet connection!\n")
if(length(nr.p)==0){
warning("It seems the page did not load. Try to increase waiting time.")
}
## Download tables in page-wise batches
# tabs <- list()
# for (i in 0:(nr.p-1)) {
# tabs[[i + 1]] <- retry_next_page_download(
# z = i,
# remdriver = dr,
# verbose = verbose,
# max_attempts = 5,
# wait_seconds = wait,
# portal.name = portal.name
# )
# Sys.sleep(1)
# }
## new
if(nr.p == 1){
tabs <- remote_table_retry(remdriver = dr, wait, max_attempts = max_attempts)
message("... done")
}else{
tabs <- download(
remdriver = dr,
max_attempts = max_attempts,
wait = wait,
portal.name = portal.name,
nr_pages = nr.p
)
}
## Rbind all tables
if(verbose)
message(nrow(tabs), " records were downloaded \n")
colnames(tabs) <- gsub(" ", "\\.", colnames(tabs))
colnames(tabs) <- gsub("/", "\\.", colnames(tabs))
## Add coordinates as lon lat column
tabs$coord <- stringr::str_extract(tabs$Locality, "-?\\d*\\.\\d*\\s\\-?\\d*\\.\\d*")
if(!all(is.na(tabs$coord))){
coords <- data.frame(do.call(rbind, strsplit(tabs$coord , " ")))
names(coords) <- c("lat", "lon")
coords <- suppressWarnings(apply(coords, 2, function(x) as.numeric(as.character(x))))
tabs <- data.frame(tabs, coords)
}
## Extract only species name (linnean)
tabs$species <- gsub("\\s\\s", " ", tabs$Scientific.Name)
tabs$species <- gsub("\\bFr\\.\\b", "", tabs$species)
tabs$species <- gsub("\\(|\\)", "", tabs$species)
tabs$species <- gsub("\\s[A-Z].*", "", tabs$species)
tabs$species <- word(tabs$species, 1,2)
# Close Website and Server ------------------------------------------------
message(ifelse(verbose, "Close website and quit server\n", ""))
## Close Website
dr$close()
## Stop docker
stop_docker()
## Return downloaded query results as data.frame
cit <-
paste0(
"Biodiversity occurrence data published by: <",
"all collections",
">; (Accessed through: ",
portal.name,
" (", portal.url, "); Date: ",
Sys.Date(),
")"
)
records(
nr.records = nrow(tabs),
citation = cit,
query = query,
records = tabs,
db = portal.name
)
}
|
2b264ee7f65382882f7b9966240f940e9192f7a1
|
5dfb2dbb3b6a2516b8f2a42df11267c6d13e9d34
|
/load_data.R
|
0920dffb25eeee1fcb47c0a095df6dbd9e801c0d
|
[] |
no_license
|
danfan1/ExData_Plotting1
|
4cceffc90e18f9256ca27b0c84583e21e54769ea
|
d1bf49614edeb6583e0fdc72c62d618379423939
|
refs/heads/master
| 2021-01-18T08:40:53.728730
| 2016-02-09T14:59:11
| 2016-02-09T14:59:11
| 51,367,033
| 0
| 0
| null | 2016-02-09T13:02:57
| 2016-02-09T13:02:57
| null |
UTF-8
|
R
| false
| false
| 997
|
r
|
load_data.R
|
# Download data
if (!file.exists("data")) {
dir.create("data")
}
dataFile <- "./data/household_power_consumption.txt"
if (!file.exists(dataFile)) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
destFile <- "./data/household_power_consumption.zip"
download.file(fileUrl, destfile = destFile, method = "curl")
dateDownloaded <- date()
unzip(destFile, exdir = "data")
}
# Read data
if (!exists("rawData")) {
first5rows <- read.table(dataFile, header = TRUE, sep = ";", na.strings = "?", nrows = 5)
classes <- sapply(first5rows, class)
rawData <- read.table(dataFile, header = TRUE, sep = ";", na.strings = "?", colClasses = classes)
}
# Filter and parse date and time data
if (!exists("hpc")) {
hpc <- subset(rawData, Date == "1/2/2007" | Date == "2/2/2007")
hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S")
hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y")
hpc$Time <- strptime(hpc$Time, "%H:%M:%S")
}
|
75d1edd684caee917896d4fd43cfbb365066bfc4
|
3703faed5861f855dbf1677f02af7902c3f46687
|
/src/GeneticNetworks/Genetic_Networks/GeneticNetworks.R
|
e795a4873270750b4e66a79e7a225d20c04a4c37
|
[] |
no_license
|
ECGen/ComGenR_development
|
33a36e4b642e3d4fd6a2343f6e2afe192fdf09c7
|
799052622b61b7599721a6d7cb432d701bfd147e
|
refs/heads/master
| 2021-05-27T07:46:19.821008
| 2014-03-18T23:34:36
| 2014-03-18T23:34:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,371
|
r
|
GeneticNetworks.R
|
#Genetic Networks
source('/Users/Aeolus/Documents/Active_Projects/GeneticNetworks/Genetic_Networks/ind_net.R')
library(igraph)
source('/Users/Aeolus/Documents/Active_Projects/GeneticNetworks/Genetic_Networks/igraph_adds.R')
library(RColorBrewer)
#Relict Data
#With parentals
dir('/Users/aeolus/Documents/Active_Projects/GeneticNetworks/Genetic_Networks')
x=read.csv('/Users/aeolus/Documents/Active_Projects/GeneticNetworks/Genetic_Networks/relicts data revised sw.csv')#[-303:-311,]
table(x[,2])
#removed the parentals
#x=x[x[,2]!='Fremont',]
#x=x[x[,2]!='narrowleaf',]
#x=x[x[,2]!='trichocarpa',]
#Change parental labels
levels(x[,2])[levels(x[,2])=='Fremont']='F'
levels(x[,2])[levels(x[,2])=='narrowleaf']='N'
levels(x[,2])[levels(x[,2])=='trichocarpa']='T'
#extract geographic and genotype removal info
NV.rm=x[,3]
NV.rm[is.na(NV.rm)]=0
x=x[NV.rm==0,]
NV.geo=x[,4:7]
x=x[,-3:-7]
#Take the most abundanct allele from all populations
x.=x[,-1:-2]
x.=allele.patch(x.)
x=data.frame(x[,1:2],x.)
#remove missing alleles (i.e., -1 values)
nr=nrow(x)
x[x==-1]<-NA
x=na.omit(x)
(nr-nrow(x))/nr
colnames(x)
#
y=x[,1:2]
x=x[,-1:-2]
#separate the alleles for all the loci into two matrixes
x1x2=allele.split(x)
x1=x1x2[[1]]
x2=x1x2[[2]]
#create a vector for the allele names
alleles=unique(vectorize(cbind(x1,x2)))
#create the codification matrix
C=codify(x1,x2)
colnames(C)=alleles
#Obtain the centroids for populations by averaging the multivariate coding vectors for each population
pop=y[,2]
C.=centroids(C,pop)
pk=table(vectorize(cbind(x1,x2))) #alleleic frequencies
K=length(alleles)
#Genetic distance
#dij^2 = (1/2)*sum((1/K*pk)*(yik-yjk)^2)
dij=gdist(C.,pk,K)
#Calculate the independence network
net.=ind.net(as.matrix(dij),nrow(x),alpha=0.05)
#Convert partial correlations to relative strengths
sij=(-1/2)*log((1-net.^2))
image(sij)
#Look for communities/modules
#http://cneurocvs.rmki.kfki.hu/igraph/doc/R/plot.common.html
graph.=as.matrix(round(sij,5))
graph.sij=graph.adjacency(graph.,mode='undirected',weighted=TRUE)
sij.wtc=walktrap.community(graph.sij,steps=4)
sij.mod=sij.wtc$membership+1
modularity(graph.sij,sij.mod,unclass(graph.sij)[[9]][4][[1]]$weight)
#par(bg='black')
g=graph.sij
vertex.label=rownames(graph.)
vertex.color=brewer.pal(length(unique(sij.mod)),'Set1')[sij.mod]
my.layout=layout.fruchterman.reingold(g)
vertex.label.color='white'
vertex.frame.color='white'
vertex.label.family='Helvetica'
edge.label=graph.[lower.tri(graph.)]
edge.label=edge.label[edge.label>0]
edge.label.color='violet'
edge.label.family='Helvetica'
edge.label.cex=0.8
edge.width=(edge.label*20)^1.5
edge.color='lightgrey'
plot.igraph(x=g,vertex.label=vertex.label,layout=my.layout,vertex.color=vertex.color,vertex.label.color=vertex.label.color,vertex.frame.color=vertex.frame.color,vertex.label.family=vertex.label.family,edge.label=round(edge.label,2),edge.label.color=edge.label.color,edge.label.cex=edge.label.cex,edge.label.family=edge.label.family,edge.width=edge.width,edge.color=edge.color)
plot.igraph(x=g,vertex.label=vertex.label,layout=my.layout,vertex.color=vertex.color,vertex.label.color=vertex.label.color,vertex.frame.color=vertex.frame.color,vertex.label.family=vertex.label.family,edge.label.color=edge.label.color,edge.label.cex=edge.label.cex,edge.label.family=edge.label.family,edge.width=edge.width,edge.color=edge.color)
graph.=as.matrix(round(sij,1))
graph.sij=graph.adjacency(graph.,mode='undirected',weighted=TRUE)
g=graph.sij
edge.label=graph.[lower.tri(graph.)]
edge.label=edge.label[edge.label>0]
edge.label.cex=0.8
edge.width=(edge.label*20)^1.5
plot.igraph(x=g,vertex.label=vertex.label,layout=my.layout,vertex.color=vertex.color,vertex.label.color=vertex.label.color,vertex.frame.color=vertex.frame.color,vertex.label.family=vertex.label.family,edge.label.color=edge.label.color,edge.label.cex=edge.label.cex,edge.label.family=edge.label.family,edge.width=edge.width,edge.color=edge.color)
my.layout=layout.drl(g)
plot.igraph(x=g,vertex.label=vertex.label,layout=my.layout,vertex.color=vertex.color,vertex.label.color=vertex.label.color,vertex.frame.color=vertex.frame.color,vertex.label.family=vertex.label.family,edge.label.color=edge.label.color,edge.label.cex=edge.label.cex,edge.label.family=edge.label.family,edge.width=edge.width,edge.color=edge.color)
#hand positioning
tk.layout=read.csv('/Users/Aeolus/Documents/Active_Projects/GeneticNetworks/Genetic_Networks/tklayout.csv')[,-1]
colnames(tk.layout)=c('','')
tkplot(g,layout=tk.layout,vertex.label=vertex.label,vertex.color=vertex.color,vertex.label.color='white',edge.width=edge.width)
#save the tk layout
#tk.layout=tkplot.getcoords(23)
#write.csv(tk.layout,file='tklayout.csv')
#Overlay graph with geographic info
library(maps)
library(gmaps)
library(shape)
attach(NV.geo)
sites=y[,2]
NV.geo=data.frame(sites,Center.Lat,Center.Long)
NV.geoX=data.frame(array(NA,c(length(unique(sites)),3)))
colnames(NV.geoX)=colnames(NV.geo)
NV.geoX[,1]=unique(sites)
for (i in seq(along=NV.geoX[,1])){
q=NV.geo[NV.geo$sites==NV.geoX$sites[i],2:3]
NV.geoX[i,2:3]=q[1,]
}
map('state','nevada')
FNT=locator(3)
NV.geoX[1:3,2:3]=cbind(FNT$y,FNT$x)
text(NV.geoX[,3],NV.geoX[,2],labels=as.character(NV.geoX[,1]),col='black',cex=0.5)
FNT=locator(3)
NV.geoX[1:3,2:3]=cbind(FNT$y,FNT$x)
map('state','nevada')
text(NV.geoX[order(NV.geoX[,1]),3],NV.geoX[order(NV.geoX[,1]),2],labels=as.character(NV.geoX[order(NV.geoX[,1]),1]),col=brewer.pal(length(unique(sij.mod)),'Set1')[sij.mod],cex=0.5)
#Overall Fit Test
#From Dyer and Nasson 2004 using model deviance
#Dm=n_total*log(Sigma/S), where Sigma is the determinant if the MLE estimate of the covariance matrix and S is the determinant of the observed sample covariance matrix
#According to Fortuna et al 2009 and Dyer and Nasson 2004, the fit test isn't exactly necessary because adding additional links to improve fit doesn't necessarily change the patterns of network
#SKIP
#library(ggm)
#Cij=d2cov(dij)
#fitCovGraph(sij,abs(Cij),nrow(x))
#number of excluded edges
#sij.lower=sij[lower.tri(sij)]
#Dm.df=length(sij.lower[sij.lower==0])
#chisq.crit=qchisq(0.05,Dm.df,lower.tail=FALSE)
#plot(0:300,dchisq(0:300,Dm.df),type='l',xlab='Chi^2',ylab='Density')
#points(chisq.crit,dchisq(chisq.crit,Dm.df),pch=19,col='red')
#abline(v=chisq.crit,lty=2)
#text(chisq.crit+25,dchisq(chisq.crit,Dm.df),labels='P > 0.05',cex=0.85)
|
d116b350cfe070069c4b4d7094649a5df71213c4
|
b6d3940b0e6461fbaea946f5dfa94e66c5e1d4c5
|
/R/LDAK_kinship.R
|
0ed7752e055d1c39e1334e2833c209fe21115fdc
|
[] |
no_license
|
vincentgarin/mppGWAS
|
763788b472cbfc3c35fc6a95976f1c6b495006ec
|
3cfe5a34f31f085390e9890a9e04a3851f03c76c
|
refs/heads/master
| 2021-03-22T03:04:27.084701
| 2017-11-23T16:14:36
| 2017-11-23T16:14:36
| 96,194,424
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,816
|
r
|
LDAK_kinship.R
|
################
# LDAK_kinship #
################
# Compute linkage desequilibrium adjusted kinship
#
# Compute linkage disequilibrium adjusted kinship using the program LDAK.
# (http://dougspeed.com/ldak/). The function is a wrapper for the LDAK
# software.
#
# @param ldak.dir directory where the ldak executable program is located
#
# @param weights.loc path to the LDAK weights files (output of function
# \code{\link{LDAK_weights}}). For example, /home/.../weights.
#
# @param bed.file.loc path to the .bed files (output of function
# \code{\link{write_plink_bed}}) without .bed extension. For example,
# /home/.../my_file.
#
# @param out.dir output directory where temporary file will be saved.
# These files will be removed. Default = getwd().
#
# @param power \code{Numerical} value specifying the value of the
# parameter for marker scores standardization. The column of the marker matrix
# (X.j) are multiplied by var(X.j)^(power/2) .Default = -1.
#
# @param K_i \code{Numerical} value specifying a unique chromosome number that
# should be removed from the kinship computation. By default \code{K_i = NULL},
# which means that the kinship is computed using all markers.
#
# @param map If \code{K_i} is not NULL, \code{data.frame} map information with
# at least a colum for marker identifier labeled \code{'mk.id'}, and one column
# for chromosome indicator labeled \code{'chr'}. \strong{The marker identificer
# must be the same as the one of the weight file.}. Default = NULL.
#
# @return Return:
#
# \item{K}{kinship matrix computed with the LDAK weights.}
#
# @author Vincent Garin
#
# @references
#
# Speed, D., Hemani, G., Johnson, M. R., & Balding, D. J. (2012).
# Improved heritability estimation from genome-wide SNPs. The American Journal
# of Human Genetics, 91(6), 1011-1021.
#
# @export
#
# ldak.dir <- "/home/vincent/Haplo_GRM/software/LDAK"
# weights.loc <- "/home/vincent/Haplo_GRM/EUNAM/data/geno/LDAK_test/Test_LDAK_weights"
# bed.file.loc <- "/home/vincent/Haplo_GRM/EUNAM/data/geno/plink_files/Test"
# out.dir <- "/home/vincent/Haplo_GRM/EUNAM/data/geno/LDAK_test"
# power <- -1
# K_i <- 1
# map <- map
LDAK_kinship <- function(ldak.dir, weights.loc, bed.file.loc, out.dir = getwd(),
power = -1, K_i = NULL, map = NULL){
# create a temporary output directory to store all the intermediary files
temp.dir <- file.path(out.dir, "temp_dir")
system(paste("mkdir", temp.dir))
# save a copy of the weights in the temporary directory
wgh.file2 <- file.path(temp.dir, "wgh")
system(paste("cp", weights.loc, wgh.file2))
if (!is.null(K_i)){
wgh <- read.table(wgh.file2, header = TRUE, stringsAsFactors = FALSE)
# Check the name of the marker in the map and weight file
if(dim(map)[1] != dim(wgh)[1]){
stop(paste("The list of marker in the map and in the weight file do not",
"have the same length."))
}
if((sum(map$mk.id %in% wgh[, 1]) != dim(map)[1])){
stop(paste("The list of marker in the map and in the weight file are",
"different."))
}
# list of markers to set to zero
mk.list <- map$mk.id[map$chr == K_i]
wgh[wgh[, 1] %in% mk.list, 2] <- 0
# save the modified weights
write.table(x = wgh, file = wgh.file2, row.names = FALSE, quote = FALSE)
}
# kinship computation
out.kin <- file.path(temp.dir, "kin")
ldak.loc <- file.path(ldak.dir, "ldak5.beta")
cmd3 <- paste(ldak.loc, "--calc-kins-direct", out.kin, "--weights",
wgh.file2, "--bfile", bed.file.loc, paste("--power", power),
"--kinship-raw YES")
system(cmd3)
# load kinship
kin.loc <- file.path(temp.dir, "kin.grm.raw")
K <- read.table( kin.loc)
# delete temp directory
system(paste("rm -rf", temp.dir))
return(K)
}
|
0f58158ef890a77a3f529bed5f6af25a6c634880
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/paws/R/support_service.R
|
e3c8048366af8b16819bed36c453ee0b5cae6239
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 10,072
|
r
|
support_service.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config merge_config
NULL
#' AWS Support
#'
#' @description
#' Amazon Web Services Support
#'
#' The *Amazon Web Services Support API Reference* is intended for
#' programmers who need detailed information about the Amazon Web Services
#' Support operations and data types. You can use the API to manage your
#' support cases programmatically. The Amazon Web Services Support API uses
#' HTTP methods that return results in JSON format.
#'
#' - You must have a Business, Enterprise On-Ramp, or Enterprise Support
#' plan to use the Amazon Web Services Support API.
#'
#' - If you call the Amazon Web Services Support API from an account that
#' doesn't have a Business, Enterprise On-Ramp, or Enterprise Support
#' plan, the `SubscriptionRequiredException` error message appears. For
#' information about changing your support plan, see [Amazon Web
#' Services Support](https://aws.amazon.com/premiumsupport/).
#'
#' You can also use the Amazon Web Services Support API to access features
#' for [Trusted
#' Advisor](https://aws.amazon.com/premiumsupport/technology/trusted-advisor/).
#' You can return a list of checks and their descriptions, get check
#' results, specify checks to refresh, and get the refresh status of
#' checks.
#'
#' You can manage your support cases with the following Amazon Web Services
#' Support API operations:
#'
#' - The [`create_case`][support_create_case],
#' [`describe_cases`][support_describe_cases],
#' [`describe_attachment`][support_describe_attachment], and
#' [`resolve_case`][support_resolve_case] operations create Amazon Web
#' Services Support cases, retrieve information about cases, and
#' resolve cases.
#'
#' - The [`describe_communications`][support_describe_communications],
#' [`add_communication_to_case`][support_add_communication_to_case],
#' and [`add_attachments_to_set`][support_add_attachments_to_set]
#' operations retrieve and add communications and attachments to Amazon
#' Web Services Support cases.
#'
#' - The [`describe_services`][support_describe_services] and
#' [`describe_severity_levels`][support_describe_severity_levels]
#' operations return Amazon Web Service names, service codes, service
#' categories, and problem severity levels. You use these values when
#' you call the [`create_case`][support_create_case] operation.
#'
#' You can also use the Amazon Web Services Support API to call the Trusted
#' Advisor operations. For more information, see [Trusted
#' Advisor](https://docs.aws.amazon.com/) in the *Amazon Web Services
#' Support User Guide*.
#'
#' For authentication of requests, Amazon Web Services Support uses
#' [Signature Version 4 Signing
#' Process](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html).
#'
#' For more information about this service and the endpoints to use, see
#' [About the Amazon Web Services Support
#' API](https://docs.aws.amazon.com/awssupport/latest/user/about-support-api.html)
#' in the *Amazon Web Services Support User Guide*.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{credentials}:} {\itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' }}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.}
#' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
#' }
#' @param
#' credentials
#' Optional credentials shorthand for the config parameter
#' \itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' }
#' @param
#' endpoint
#' Optional shorthand for complete URL to use for the constructed client.
#' @param
#' region
#' Optional shorthand for AWS Region used in instantiating the client.
#'
#' @section Service syntax:
#' ```
#' svc <- support(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical",
#' sts_regional_endpoint = "string"
#' ),
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- support()
#' svc$add_attachments_to_set(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=support_add_attachments_to_set]{add_attachments_to_set} \tab Adds one or more attachments to an attachment set\cr
#' \link[=support_add_communication_to_case]{add_communication_to_case} \tab Adds additional customer communication to an Amazon Web Services Support case\cr
#' \link[=support_create_case]{create_case} \tab Creates a case in the Amazon Web Services Support Center\cr
#' \link[=support_describe_attachment]{describe_attachment} \tab Returns the attachment that has the specified ID\cr
#' \link[=support_describe_cases]{describe_cases} \tab Returns a list of cases that you specify by passing one or more case IDs\cr
#' \link[=support_describe_communications]{describe_communications} \tab Returns communications and attachments for one or more support cases\cr
#' \link[=support_describe_create_case_options]{describe_create_case_options} \tab Returns a list of CreateCaseOption types along with the corresponding supported hours and language availability\cr
#' \link[=support_describe_services]{describe_services} \tab Returns the current list of Amazon Web Services services and a list of service categories for each service\cr
#' \link[=support_describe_severity_levels]{describe_severity_levels} \tab Returns the list of severity levels that you can assign to a support case\cr
#' \link[=support_describe_supported_languages]{describe_supported_languages} \tab Returns a list of supported languages for a specified categoryCode, issueType and serviceCode\cr
#' \link[=support_describe_trusted_advisor_check_refresh_statuses]{describe_trusted_advisor_check_refresh_statuses} \tab Returns the refresh status of the Trusted Advisor checks that have the specified check IDs\cr
#' \link[=support_describe_trusted_advisor_check_result]{describe_trusted_advisor_check_result} \tab Returns the results of the Trusted Advisor check that has the specified check ID\cr
#' \link[=support_describe_trusted_advisor_checks]{describe_trusted_advisor_checks} \tab Returns information about all available Trusted Advisor checks, including the name, ID, category, description, and metadata\cr
#' \link[=support_describe_trusted_advisor_check_summaries]{describe_trusted_advisor_check_summaries} \tab Returns the results for the Trusted Advisor check summaries for the check IDs that you specified\cr
#' \link[=support_refresh_trusted_advisor_check]{refresh_trusted_advisor_check} \tab Refreshes the Trusted Advisor check that you specify using the check ID\cr
#' \link[=support_resolve_case]{resolve_case} \tab Resolves a support case
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname support
#' @export
support <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) {
config <- merge_config(
config,
list(
credentials = credentials,
endpoint = endpoint,
region = region
)
)
svc <- .support$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.support <- list()
.support$operations <- list()
.support$metadata <- list(
service_name = "support",
endpoints = list("*" = list(endpoint = "support.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "support.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "support.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "support.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "Support",
api_version = "2013-04-15",
signing_name = "support",
json_version = "1.1",
target_prefix = "AWSSupport_20130415"
)
.support$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.support$metadata, handlers, config)
}
|
d87b241a382d50fdd0d4cb32567082ae7dbb7064
|
949bc539c53ffa5233beb66787826dd7cb09c402
|
/wu_process_results.R
|
2b57c7af2072666dee52abfba4d87f53cb895294
|
[] |
no_license
|
julianhatwell/likertimpute
|
a57feb0a17094e094339e8b19f5159de75328fb0
|
15041e197d51428dfc592968baea876473828e29
|
refs/heads/master
| 2021-01-20T05:36:31.728019
| 2017-09-25T18:39:35
| 2017-09-25T18:39:35
| 89,794,617
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,562
|
r
|
wu_process_results.R
|
# results comparisons
# wu_data_pop <- synth_wu_data(wu_data_model, 500000, seed = 100001)
# wu_stats_pop_sym <- wu_collect_stats(wu_data_pop$sym)
# wu_stats_pop_masym <- wu_collect_stats(wu_data_pop$masym)
# wu_stats_pop_sasym <- wu_collect_stats(wu_data_pop$sasym)
# save(wu_stats_pop_sym
# , wu_stats_pop_masym
# , wu_stats_pop_sasym
# , file = "wu_stats_pop.RData")
# report_results <- format_results(results, wu_stats_pop_sym)
# rs_wu_sym_results <- report_results
# rs_wu_sym_results$qdata <- format_qdata(report_results$q_measures)
# rs_wu_sym_results$ci_data <- format_cidata(report_results$results, dc = FALSE)
# rs_wu_sym_results$bottom_2 <- bottom_2_count(rs_wu_sym_results)
# rs_wu_sym_results$top_2 <- top_2_count(rs_wu_sym_results)
# save(rs_wu_sym_results, file = "rs_wu_sym_results.RData")
# load("rs_wu_sym_results.RData") ; report_results <- rs_wu_sym_results
# report_results <- format_results(results, wu_stats_pop_sasym)
# rs_wu_sasym_results <- report_results
# rs_wu_sasym_results$qdata <- format_qdata(report_results$q_measures)
# rs_wu_sasym_results$ci_data <- format_cidata(report_results$results, dc = FALSE)
# rs_wu_sasym_results$bottom_2 <- bottom_2_count(rs_wu_sasym_results)
# rs_wu_sasym_results$top_2 <- top_2_count(rs_wu_sasym_results)
# save(rs_wu_sasym_results, file = "rs_wu_sasym_results.RData")
# load("rs_wu_sasym_results.RData") ; report_results <- rs_wu_sasym_results
# report_results <- format_results(results, wu_stats_pop_sym)
# dc_wu_sym_results <- report_results
# dc_wu_sym_results$qdata <- format_qdata(report_results$q_measures)
# dc_wu_sym_results$ci_data <- format_cidata(report_results$results)
# dc_wu_sym_results$bottom_2 <- bottom_2_count(dc_wu_sym_results)
# dc_wu_sym_results$top_2 <- top_2_count(dc_wu_sym_results)
# save(dc_wu_sym_results, file = "dc_wu_sym_results.RData")
# load("dc_wu_sym_results.RData") ; report_results <- dc_wu_sym_results
#
# report_results <- format_results(results, wu_stats_pop_sasym)
# dc_wu_sasym_results <- report_results
# dc_wu_sasym_results$qdata <- format_qdata(report_results$q_measures)
# dc_wu_sasym_results$ci_data <- format_cidata(report_results$results)
# dc_wu_sasym_results$bottom_2 <- bottom_2_count(dc_wu_sasym_results)
# dc_wu_sasym_results$top_2 <- top_2_count(dc_wu_sasym_results)
# save(dc_wu_sasym_results, file = "dc_wu_sasym_results.RData")
# load("dc_wu_sasym_results.RData") ; report_results <- dc_wu_sasym_results
# report_results <- format_results(results, wu_stats_pop_sym)
# im_wu_sym_results <- report_results
# im_wu_sym_results$qdata <- format_qdata(report_results$q_measures)
# im_wu_sym_results$ci_data <- format_cidata(report_results$results)
# im_wu_sym_results$bottom_2 <- bottom_2_count(im_wu_sym_results)
# save(im_wu_sym_results, file = "im_wu_sym_results.RData")
# load("im_wu_sym_results.RData") ; report_results <- im_wu_sym_results
# report_results <- format_results(results, wu_stats_pop_sasym)
# im_wu_sasym_results <- report_results
# im_wu_sasym_results$qdata <- format_qdata(report_results$q_measures)
# im_wu_sasym_results$ci_data <- format_cidata(report_results$results)
# im_wu_sasym_results$bottom_2 <- bottom_2_count(im_wu_sasym_results)
# save(im_wu_sasym_results, file = "im_wu_sasym_results.RData")
# load("im_wu_sasym_results.RData") ; report_results <- im_wu_sasym_results
# report_results <- format_results(results, wu_stats_pop_sym)
# vr_wu_sym_results <- report_results
# vr_wu_sym_results$qdata <- format_qdata(report_results$q_measures)
# vr_wu_sym_results$ci_data <- format_cidata(report_results$results)
# vr_wu_sym_results$bottom_2 <- bottom_2_count(vr_wu_sym_results)
# save(vr_wu_sym_results, file = "vr1_wu_sym_results.RData")
# save(vr_wu_sym_results, file = "vr2_wu_sym_results.RData")
# load("vr1_wu_sym_results.RData") ; report_results <- vr_wu_sym_results
# report_results <- format_results(results, wu_stats_pop_sasym)
# vr_wu_sasym_results <- report_results
# vr_wu_sasym_results$qdata <- format_qdata(report_results$q_measures)
# vr_wu_sasym_results$ci_data <- format_cidata(report_results$results)
# vr_wu_sasym_results$bottom_2 <- bottom_2_count(vr_wu_sasym_results)
# save(vr_wu_sasym_results, file = "vr1_wu_sasym_results.RData")
# save(vr_wu_sasym_results, file = "vr2_wu_sasym_results.RData")
# load("vr_wu_sasym_results.RData") ; report_results <- vr_wu_sasym_results
# report_results <- format_results(results, wu_stats_pop_sym)
# bm_wu_sym_results <- report_results
# bm_wu_sym_results$qdata <- format_qdata(report_results$q_measures)
# bm_wu_sym_results$ci_data <- format_cidata(report_results$results)
# bm_wu_sym_results$bottom_2 <- bottom_2_count(bm_wu_sym_results)
# save(bm_wu_sym_results, file = "bm1_wu_sym_results.RData")
# save(bm_wu_sym_results, file = "bm2_wu_sym_results.RData")
# load("bm_wu_sym_results.RData") ; report_results <- bm_wu_sym_results
# report_results <- format_results(results, wu_stats_pop_sasym)
# bm_wu_sasym_results <- report_results
# bm_wu_sasym_results$qdata <- format_qdata(report_results$q_measures)
# bm_wu_sasym_results$ci_data <- format_cidata(report_results$results)
# bm_wu_sasym_results$bottom_2 <- bottom_2_count(bm_wu_sasym_results)
# save(bm_wu_sasym_results, file = "bm1_wu_sasym_results.RData")
# save(bm_wu_sasym_results, file = "bm2_wu_sasym_results.RData")
# load("bm_wu_sasym_results.RData") ; report_results <- bm_wu_sasym_results
# if not want to run experiments again
# load("wu_results.RData")
source("C:\\Dev\\Study\\R\\R_Themes\\MarketingTheme.R")
MyTempTheme <- MyLatticeTheme
MyTempTheme$superpose.symbol$col <- myPal
MyTempTheme$add.line$col <- myPalDark[3]
lattice::dotplot(factor(variant)~value | factor(stats)
, groups = factor(dataset)
, data = subset(report_results$q_measures
, stats %in% stats_names[c(1, 6)] &
q_measure == "mean")
, scales = "free"
, par.settings = MyTempTheme
, strip = MyLatticeStrip
, auto.key = list(columns = 3)
, panel = function(x, y, ...) {
panel.dotplot(x, y, ...)
panel.abline(
v = unlist(pop_stats[stats_names[c(1, 6)[panel.number()]]]))
})
lattice::dotplot(factor(variant)~value | factor(stats)
, groups = factor(dataset)
, data = subset(report_results$q_measures
, stats %in% stats_names[c(19, 20)] &
q_measure == "mean")
, scales = "free"
, par.settings = MyTempTheme
, strip = MyLatticeStrip
, auto.key = list(columns = 3)
, panel = function(x, y, ...) {
panel.dotplot(x, y, ...)
if (!(is.null(unlist(pop_stats[stats_names[c(19, 20)[panel.number()]]])))) {
panel.abline(
v = unlist(pop_stats[stats_names[c(19, 20)[panel.number()]]]))
}
})
stats_names[c(1, 3, 4, 6, 8, 9)[panel.number()]][[1]]
lattice::stripplot(factor(variant)~value | factor(stats)
, groups = factor(dataset)
, data = subset(report_results$results
, stats %in%
stats_names[c(15, 17, 18, 19, 20)])
, jitter.data = TRUE
, scales = "free"
, par.settings = MyTempTheme
, strip = MyLatticeStrip
, auto.key = list(columns = 3)
, panel = function(x, y, ...) {
panel.stripplot(x, y, ...)
panel.abline(
v = pop_stats[stats_names[c(15, 17, 18, 19, 20)[panel.number()]]][[1]])
})
lattice::dotplot(factor(variant)~value
, groups = factor(dataset)
, data = subset(report_results$q_measures
, stats == "alpha_A" & q_measure == "mean")
, par.settings = MyTempTheme
, strip = MyLatticeStrip
, auto.key = list(columns = 3)
)
g <- ggplot(data = subset(report_results$results
, stats %in%
stats_names[c(1, 3, 4, 6, 8, 9)])
, aes(x = value
, y = variant
, colour = dataset
)
) +
facet_wrap(~stats, scales = "free") +
geom_point() +
myGgTheme
g
g <- ggplot(data = subset(report_results$q_measures
, stats %in% stats_names[c(19, 20)] &
q_measure == "mean")
, aes(x = value
, y = variant
, colour = dataset
)
) +
facet_wrap(~stats, scales = "free") +
geom_point(size = 2) +
myGgTheme
g
pop_stats_names <- names(pop_stats)
pops <- data.frame(stats = pop_stats_names, value = unlist(pop_stats))
g <- ggplot(data = subset(report_results$q_measures
, stats %in% stats_names[c(1, 6)] &
q_measure == "mean")
, aes(x = value
, y = variant
, colour = dataset
)
) +
facet_wrap(~stats, scales = "free") +
geom_point(size = 2) +
geom_vline(data = subset(pops
, stats %in% c("alpha_A", "alpha_B"))
, aes(xintercept = value)
, colour = myPalDark[3]
, linetype = "dotted") +
myGgTheme
g
|
e3b81b7928ed23b207374edb70ecd11e0a50add1
|
bde79a681f8c6d98bb9d15f3304881498981bf03
|
/analysis/report-cards/07-generate_report_cards.R
|
9acdce281c8de14bdfce44fd0774482a4ddab14f
|
[
"MIT"
] |
permissive
|
sparkgeo/local-reef-pressures
|
17c446df65c4236a7e84071fda2ae42d1eb8bdc6
|
5ec1a647807e9cb5b0f5120d4d0d9a7e5bfc183e
|
refs/heads/main
| 2023-07-04T13:54:56.103132
| 2021-08-09T12:56:55
| 2021-08-09T12:56:55
| 395,079,635
| 0
| 0
|
NOASSERTION
| 2021-08-11T18:15:37
| 2021-08-11T18:15:36
| null |
UTF-8
|
R
| false
| false
| 155
|
r
|
07-generate_report_cards.R
|
source(here::here("R", "generate_report_card.R"))
source(here::here("R", "recode_bcus.R"))
generate_report_cards(bcu = "all", open = FALSE, quiet = TRUE)
|
8b77589e463c823cd974ea1c0396470591ac7c41
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610036127-test.R
|
40f3b8903790c728884e9018fb2fa57786bfd4fc
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
r
|
1610036127-test.R
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(1.22317776825925e+113, 6.0066238872389e+180, 1.15963946977352e-152, 5.77096118049817e+228, 1.30813306747254e+166, 3.5316372282246e-304, 5.33991605498766e-307, 0), .Dim = c(8L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result)
|
defa5dd5cd3c38a406477ae3b5ee2dd3eb41a516
|
d34a8d7b06cea619d62c97d17e77cc8c81cbdb66
|
/R/demoChart.R
|
313da5f19a911a4f2f73bb1d5ddbd6f65889220d
|
[] |
no_license
|
AngelOfMusic/QCAGUI
|
97d790c25161c86528ce81c96b4ff533e311651a
|
7cd2a1b3b408b61a8bc9619eda8a0be473699045
|
refs/heads/master
| 2021-01-16T19:41:12.199237
| 2016-02-16T15:40:42
| 2016-02-16T15:40:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
r
|
demoChart.R
|
`demoChart` <-
function(primes = c(""), configs = c(""), prod.split="") {
if (prod.split != "") prod.split <- paste("\\", prod.split, sep="")
primes.split <- strsplit(primes, prod.split)
configs.split <- strsplit(configs, prod.split)
mtrx <- matrix(FALSE, nrow=length(primes), ncol=length(configs))
for (i in seq(nrow(mtrx))) {
for (j in seq(ncol(mtrx))) {
mtrx[i, j] <- all(primes.split[[i]] %in% configs.split[[j]])
}
}
colnames(mtrx) <- configs
rownames(mtrx) <- primes
return(mtrx)
}
|
de97e573a03919fbb2622950afad03b527785321
|
1e018375afab08fc10bc5456448234c788ff1aae
|
/rcloud.packages/rcloud.lux/R/zzz.R
|
05c5d22b7278d4e7e51080a9f9bc9550d92869ba
|
[
"MIT"
] |
permissive
|
att/rcloud
|
5187a71e83726e9e7425adde8534cf66690cac7f
|
3630ec73cebfc8df1b2ee4bd4a07fbe81cb03bb0
|
refs/heads/develop
| 2023-08-23T18:14:45.171238
| 2022-08-25T23:49:52
| 2022-08-25T23:49:52
| 5,250,457
| 322
| 138
|
MIT
| 2023-05-22T19:46:48
| 2012-07-31T19:32:52
|
JavaScript
|
UTF-8
|
R
| false
| false
| 383
|
r
|
zzz.R
|
lux.caps <- NULL
.onLoad <- function(libname, pkgname)
{
f <- function(module.name, module.path) {
path <- system.file("javascript", module.path, package="rcloud.lux")
caps <- rcloud.install.js.module(module.name,
paste(readLines(path), collapse='\n'))
caps
}
f("lux", "lux.js")
lux.caps <<- f("lux_plot", "lux_plot.js")
}
|
896411194fabf4fbb5e83abc34fb12888d4df6a1
|
c1edefa312a3613f6d684b5658454dcaef80b4e1
|
/R/0506 텍스트마이닝(중요)/문자열 가공하기(stringr).R
|
979020e75dec28a191fc79d77ae2855aef1aa030
|
[] |
no_license
|
SunhoPark2107/R-lecture-2021
|
36fdce26a9ee37d7ff950abb8f15350306f536fc
|
55be11430f13147ab2562b7a81258db01a0c1477
|
refs/heads/main
| 2023-06-04T14:05:56.891884
| 2021-06-25T11:27:11
| 2021-06-25T11:27:11
| 359,694,724
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,209
|
r
|
문자열 가공하기(stringr).R
|
# 문자열 처리
# 수업자료 : https://m.blog.naver.com/PostView.nhn?blogId=nife0719&logNo=220975845463&proxyReferer=https:%2F%2Fwww.google.com%2F
library(stringr)
# 1. Character로 변환.
example <- 1
typeof(example) # double => 실수
example <- as.character(example)
typeof(example) # character이 됨.
# 입력을 받는 경우
input <- readline('Prompt > ')
input
typeof(input) # 숫자로 바꾸어 주어야 함.
i <- as.numeric(input)
typeof(i)
3 * i # i를 숫자로 바꾸어 주었기 때문에 연산도 가능.
# 2. String 이어 붙이기
paste('A', 'quick', 'brown', 'fox') # paste는 기본적으로 문자 사이 공백이 붙음.
paste0('A', 'quick', 'brown', 'fox')
paste('A', 'quick', 'brown', 'fox', sep = "-") # seperate 주면 구분을 다른 걸로 해 줌.
s <- paste('A', 'quick', 'brown', 'fox', sep = "-")
str_split(s, '-') # [[1]] => 겹 대괄호는 리스트를 뜻함. 나눠진 문자열들이 리스트 형태로 생성되었음.
sample <- c('A', 'quick', 'brown', 'fox' )
paste(sample) # 벡터로 들어가있는 걸 그냥 붙이면 이렇게 됨.
paste(sample, collapse = " ")
paste(sample, collapse = "-")
str_c(sample, '1', sep = "_") # 벡터 내 각 element에 대하여 "_"과 1을 이어붙인다.
str_c(sample, '1', sep = '_', collapse = '@@') # collapse는 각기 나뉘어져 있는 여러 개의 str을 하나의 문장으로 이어 주는 것.
# 참고로, 리스트는 이렇게 됨.
l <- str_split(s, '-')
l[1]
paste(l) # 리스트 형태로 되어 있는 것을 그냥 붙이면 이렇게 되어 버림.
# 근데 리스트 형태로 되어 있는 것은 그대로 안붙음. ? 해결방법 못찾았다.
# 3. Character 개수 카운트
x <- 'Hello'
nchar(x)
h <- '안녕하세요' # 15바이트 (utf-8로 현재 인코딩된 상태.)
nchar(h) # 하지만 nchar는 5개로 인식을 한다.
str_length(h)
# 4. 소문자 변환(한글은 해당사항 없음)
tolower(x)
# 5. 대문자 변환
toupper(x)
# 6. 2개의 character vector를 중복되는 항목 없이 합하기.
vector_1 <- c("hello", "world", "r", "program")
vector_2 <- c("hi", "world", "r", "coding")
union(vector_1, vector_2) # 합집합. 따라서 중복된 걸 배제하게 됨.
# 7. 2개의 character vector에서 공통된 항목 추출.
intersect(vector_1, vector_2) # 교집합
# 8. 2개의 character vector에서 공통되지 않는 항목 추출.
setdiff(vector_1, vector_2) # 차집합.(vector_1 기준으로.)
# 9. 2개의 character vector 동일 여부 확인(순서 관계 없이.)
vector_3 <- c("r", "hello", "program", "world")
setequal(vector_1, vector_2)
setequal(vector_1, vector_3)
# 10. 공백 없애기
vector_1 <- c(" hello World! ", " Hi R! ")
# str_trim은 stringr 라이브러리 포함 함수.
str_trim(vector_1, side = 'left')
str_trim(vector_1, side = 'both')
# 11. string 반복해서 나타내기
str_dup(x, 3) # 한 뭉치로 같은 문자열 표현.
rep(x, 3) # 같은 문자열 복사해서 여러개
# 12. Substring(String의 일정 부분) 추출
string_1 <- "Hello World"
substr(string_1, 7, 9)
substring(string_1, 7, 9)
str_sub(string_1, 7, 9)
substr(string_1, 7) # substr 함수는 from 만 설정하고 to를 설정하지 않아 오류 남.
substring(string_1, 7) # substring함수는 from만 걸어줘도 끝까지 출력함.
str_sub(string_1, 7)
str_sub(string_1, 7, -1)
str_sub(string_1, 7, -3)
str_sub(string_1, 7, -5) # - 는 뭔지 모르겠네...? 구글링 고고
string_1[7:9] # 그냥 인덱스 하면 NA NA NA 오류 뜸.
# 평소에 substring이나 sub_str사용하는 편이 좋을듯
# 13. string의 특정 위치에 있는 값 바꾸기
str_1 <- "Today is Monday"
substr(str_1, 10, 12) <- "Sun"
str_1
substr(str_1, 10, 12) <- "Thurs"
str_1 # 문자열 길이가 안 맞아서 들어가다 만다.
# 다른 함수도 마찬가지로 문자열 길이가 안 맞으면 안 바뀜 ㅠ
# 14. 특정 패턴(문자열)을 기준으로 string자르기.
strsplit(str_1, split = " ")
str_split(str_1, pattern = " ")
str_split(str_1, patter = " ", n = 2) # 2조각으로 나눠짐.
str_split(str_1, patter = " ", n = 2, simplify = TRUE) # matrix 형태가 됨. 근데 실제로 잘 사용안함.
str_split_fixed(str_1, patter = " ", n = 2) # 위에거랑 같은 것.
s <- str_split(str_1, pattern = " ")
typeof(s) # string자르면 리스트 형태가 됨.
s[1]
s[[1]] # => 이렇게 해야 벡터 형태 출력됨.
s[[1]][1] # => 리스트 인덱스는 이렇게 해야 된다.
# 리스트를 벡터로 변환
# 리스트 형태로 되어 있으면 사용이 너무 불편함..
unlist(s)
paste(unlist(s), collapse = " ")
# 15. 특정 패턴(문자열) 찾기(기본 function 사용.)
vector_1 <- c("Xman", "Superman", "Joker")
grep("man", vector_1) # 첫번째, 두번째에 "man"이 있다고 1, 2 이렇게 알려줌.
grep("man" vector_1, value = TRUE)
regexpr("man", vector_1)
gregexpr("man", vector_1) # list의 형태로 보여 줌.
# 근데 얘네 두개 다 별로 안쓸듯.
# 16. 특정 패턴(문자열) 찾기(stringr 패키지에 포함된)
fruit <- c("apple", "banana", "cherry")
str_count(fruit, "a")
str_detect(fruit, "a")
str_locate(fruit, "a")
str_locate_all(fruit, "a")
people <- c("rorori", "emilia", "youna")
str_match(people, "o(\\D)") #\\D는 non-digit character를 의미합니다.
# 17. 특정 패턴(문자열)찾아서 다른 패턴(문자열)으로 바꾸기.
fruits <- c("one apple", "two pears", "three bananas")
sub('a', 'A', fruits) # => a를 맨 앞에 나온걸 A로 바꿈
gsub('a', 'A', fruits) # => 모든 a를 A로 바꿈.
str_replace(fruits, 'a', 'A')
str_replace_all(fruits, 'a', 'A')
sub("[aeiou]", "-", fruits)
gsub("[aeiou]", "-", fruits)
# https://regexr.com/
# regular expression 바로 적용해 보는 사이트.(수업자료 블로그에 나와 있는 부록 표 참고.)
# 정규 표현식(Regular Expression)
fruits <- c("one apple", "two pairs", "three bananas")
str_match(fruits, '[aeiou]')
str_match_all(fruits, '[aeiou]')
str_match(fruits, "\\d")
str_match(fruits, '[[:digit:]]')
|
c8aad5b815b5b8079767d93f8149c52d0f0007d4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bipartite/examples/swap.web.Rd.R
|
53d3ac6f3fc3418a7d57f2181b05f2f74b3d0166
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
swap.web.Rd.R
|
library(bipartite)
### Name: swap.web
### Title: Creates null model for bipartite networks
### Aliases: swap.web
### Keywords: package
### ** Examples
swap.web(Safariland, N=2)
|
cd6714636348a0c129baef5d61f036722bf42c51
|
d244fc1b19dbce9c0478f92462a851aeb98479f5
|
/R/map_linearinterpol_with_xtrafo.R
|
18a1a2b95b1ce01d1e7d169d59054456a8e74815
|
[
"MIT"
] |
permissive
|
gschnabel/nucdataBaynet
|
46cda45e26c207c7f21c8c63e52b67dd145fba75
|
b209af629c8df7302c0744a6d9182b4963dc5fab
|
refs/heads/main
| 2023-01-28T22:34:50.182928
| 2023-01-23T10:19:35
| 2023-01-23T10:19:35
| 183,237,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,323
|
r
|
map_linearinterpol_with_xtrafo.R
|
#' Create a linear interpolation mapping including energy calibration
#'
#' Creates a map to linearly interpolate the values at the source indices
#' given on a one-dimensional mesh to the one-dimensional mesh associated with
#' the variables at the target indices.
#' It is possible to apply a shift and scaling to the target mesh to account
#' for, e.g., an energy calibration error of an experiment.
#' The transformation is given by \eqn{x = \alpha + \beta x'} where
#' \eqn{x'} is an x-value of the target mesh as stated by the user and the
#' resulting \eqn{x} is the \emph{correct} x-value that should be used for
#' the linear interpolation.
#'
#' The following fields are required in the parameter list to initialize the mapping:
#' \tabular{ll}{
#' \code{mapname} \tab Name of the mapping \cr
#' \code{maptype} \tab Must be \code{"linearinterpol_with_xtrafo_map"} \cr
#' \code{src_idx} \tab Vector of source indices \cr
#' \code{tar_idx} \tab Vector of target indices \cr
#' \code{src_x} \tab Vector with the mesh associated with the source indices \cr
#' \code{tar_x} \tab Vector with the mesh associated with the target indices \cr
#' \code{zero_outside} \tab Default is \code{FALSE}. If TRUE, y-values of target x-values outside
#' the limits of the source mesh will be zero, otherwise this situation
#' is not allowed. \cr
#' \code{shiftx_idx} \tab Index associated with the variable that contains \eqn{\alpha}. \cr
#' \code{scalex_idx} \tab Index associated with the variable that contains \eqn{\beta}
#' }
#'
#' @return
#' Returns a list of functions to operate with the mapping, see \code{\link{create_maptype_map}}.
#' @export
#'
#' @family mappings
#' @examples
#' params <- list(
#' mapname = "mylinearintmap",
#' maptype = "linearinterpol_with_xtrafo_map",
#' src_idx = 1:3,
#' tar_idx = 4:6,
#' src_x = c(1,5,10),
#' tar_x = c(4,5,6),
#' shiftx_idx = 7,
#' scalex_idx = 8
#' )
#' mymap <- create_linearinterpol_with_xtrafo_map()
#' mymap$setup(params)
#' x <- c(1,2,3,0,0,0,0.5,1.1)
#' mymap$propagate(x)
#' mymap$jacobian(x)
#'
create_linearinterpol_with_xtrafo_map <- function() {
linmap <- NULL
xtrafo_params <- NULL
last_shiftx <- NULL
last_scalex <- NULL
last_src_x <- NULL
last_with.id <- NULL
energyderiv_coeffs <- NULL
setup <- function(params) {
stopifnot(params[["maptype"]] == getType())
stopifnot(c("shiftx_idx", "scalex_idx") %in% names(params))
xtrafo_params <<- list(
shiftx_idx = params[["shiftx_idx"]],
scalex_idx = params[["scalex_idx"]],
claimed_tar_x = params[["tar_x"]]
)
# create the basic linear interpolation map
params[["maptype"]] <- "linearinterpol_map"
params[["shiftx_idx"]] <- NULL
params[["scalex_idx"]] <- NULL
linmap <<- create_map(params)
}
getType <- function() {
return("linearinterpol_with_xtrafo_map")
}
getName <- function() {
return(linmap$getName())
}
getDescription <- function() {
return(linmap$getDescription())
}
is_linear <- function() {
return(FALSE)
}
get_src_idx <- function() {
return(c(linmap$get_src_idx(),
xtrafo_params$shiftx_idx,
xtrafo_params$scalex_idx))
}
get_tar_idx <- function() {
return(linmap$get_tar_idx())
}
propagate <- function(x, with.id=TRUE) {
update_linmap(x)
return(linmap$propagate(x, with.id))
}
jacobian <- function(x, with.id=TRUE) {
update_linmap(x)
tar_idx <- linmap$get_tar_idx()
S <- linmap$jacobian(x, with.id)
matidcs_sel <- cbind(rep(tar_idx, 2),
c(rep(xtrafo_params$shiftx_idx, length(tar_idx)),
rep(xtrafo_params$scalex_idx, length(tar_idx))))
S[matidcs_sel] <- c(energyderiv_coeffs,
energyderiv_coeffs * xtrafo_params$claimed_tar_x)
return(S)
}
# internal utility functions
update_linmap <- function(x) {
cur_shiftx <- x[xtrafo_params$shiftx_idx]
cur_scalex <- x[xtrafo_params$scalex_idx]
src_idx <- linmap$get_src_idx()
if (!isTRUE(last_shiftx == cur_shiftx) ||
!isTRUE(last_scalex == cur_scalex) ||
!isTRUE(all(x[src_idx] == last_src_x))) {
# compute target xs derivatives with respect to target energy
src_x <- linmap$get_src_x()
tar_x <- linmap$get_tar_x()
low_idx <- findInterval(tar_x, src_x, rightmost.closed=TRUE)
high_idx <- low_idx + 1
stopifnot(all(low_idx >= 1) && all(high_idx <= length(src_x)))
xdiff <- src_x[high_idx] - src_x[low_idx]
energyderiv_coeffs <<- (-x[src_idx[low_idx]] + x[src_idx[high_idx]]) / xdiff
# update the linearinterpolation map
claimed_tar_x <- xtrafo_params$claimed_tar_x
new_tar_x <- cur_shiftx + cur_scalex * claimed_tar_x
linmap$set_tar_x(new_tar_x)
# keep track of current transformation parameters
last_shiftx <<- cur_shiftx
last_scalex <<- cur_scalex
last_src_x <<- x[src_idx]
return(TRUE)
}
return(FALSE)
}
return(list(
setup = setup,
getType = getType,
getName = getName,
getDescription = getDescription,
is_linear = is_linear,
get_src_idx = get_src_idx,
get_tar_idx = get_tar_idx,
propagate = propagate,
jacobian = jacobian
))
}
|
76cd817856cd2539e76055a5c685bff9f1ac9e68
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/fechner/inst/archive/shortest.paths.information.R
|
d80ad0cdf6d54a3bc1208a7bf7e5aa02fdbd86bf
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,071
|
r
|
shortest.paths.information.R
|
########################################################################################
##computes graph-theoretic information about shortest paths from given source vertices##
##to all target vertices using an adjacency matrix; NOTE: this is an INTERNAL FUNCTION##
##not exported by the package, and as such, it does not provide checks of its argument##
########################################################################################
shortest.paths.information <-
function(M){
# M: an adjacency matrix (in Fechnerian scaling context, matrices of the psychometric increments
# of the first and second kind)
n <- dim(M)[1]
weight.distances <- matrix(nrow = n, ncol = n) # matrix of the weight-based lengths of the shortest paths from source vertices
# (row stimuli) to target vertices (column stimuli) (in Fechnerian scaling context,
# matrices of the oriented Fechnerian distances of the first and second kind)
edge.distances <- matrix(nrow = n, ncol = n) # matrix of the edge/link based (graph-theoretic) lengths of the shortest paths
# from source vertices (row stimuli) to target vertices (column stimuli)
predecessors <- matrix(nrow = n, ncol = n) # matrix of the predecessors of the column stimuli in shortest paths from the row stimuli
# (as source vertices) to the column stimuli (as target vertices)
for(id in 1:n){
node.from <- id # node.from: a given source vertex (row stimulus) for which to determine information about shortest paths
# to the column stimuli (as target vertices)
distance <- rep(Inf, n)
lvl <- rep(NA, n)
pred <- rep(0, n)
done <- rep(FALSE, n)
distance[node.from] <- 0
lvl[node.from] <- 0
for(i in 1:n){
node.closest <- (-1)
min.dist <- Inf
for(j in 1:n){
if(!done[j]){
if(distance[j] <= min.dist){
min.dist <- distance[j]
node.closest <- j
}
}
}
done[node.closest] <- TRUE
for(j in 1:n){
if(!done[j]){
if((distance[node.closest] + M[node.closest, j]) < distance[j]){
distance[j] <- (distance[node.closest] + M[node.closest, j])
pred[j] <- node.closest
}
}
}
}
distance.2 <- 0
done.2 <- NA
used <- numeric()
while(any(is.na(lvl))){
done.2 <- which(!is.na(lvl))[!is.element(which(!is.na(lvl)), used)]
distance.2 <- (distance.2 + 1)
lvl[which(is.element(pred, done.2))] <- distance.2
used <- append(used, done.2[!is.element(done.2, used)])
}
weight.distances[id, ] <- distance
edge.distances[id, ] <- lvl
predecessors[id, ] <- pred
}
dimnames(weight.distances) <- dimnames(edge.distances) <- dimnames(predecessors) <- dimnames(M)
return(list(weight.distances = weight.distances, edge.distances = edge.distances, predecessors = predecessors))
}
|
3200f0ada9dd7432bc4baa4d0f3a09b4267ed0a3
|
382c97a6ded67b48a53a5b9a1e3b9f7fe0a424dc
|
/scriptd_stats02_cv_functions.R
|
7eeecd63107d772092b7b5c928963d852876ce5a
|
[] |
no_license
|
NxNiki/weighted_lasso
|
44d3f6e533836bb1e1bb3555826b1f828129f9c2
|
3e555977af5225708bdf1fd4acac11546c819b7d
|
refs/heads/master
| 2021-12-24T08:24:04.914236
| 2021-09-12T21:12:42
| 2021-09-12T21:12:42
| 152,316,350
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,719
|
r
|
scriptd_stats02_cv_functions.R
|
#!/usr/bin/env Rscript
library(caret)
# createFolds()
scale.0.1 = function(dat) {
# the output will be coerced to matrix.
dat = as.matrix(dat)
mins = apply(dat, 2, min)
maxs = apply(dat, 2, max)
scaled.dat = scale(dat, center = mins, scale = maxs - mins)
return(scaled.dat)
}
compute.acc = function(y, yhat) {
y = as.numeric(y)
yhat = as.numeric(yhat)
acc <- sum(y == yhat) / length(y)
ylevel = sort(unique(y), decreasing = F)
if (length(ylevel) == 2) {
# asuming ylevel = c(0, 1)
sensi <-
sum(y == yhat & y == ylevel[2]) / sum(y == ylevel[2])
speci <-
sum(y == yhat & y == ylevel[1]) / sum(y == ylevel[1])
}
else if (max(yhat) == max(y)) {
sensi <- sum(y == yhat & y == ylevel) / sum(y == ylevel)
speci <- NaN
} else{
speci <- sum(y == yhat & y == ylevel) / sum(y == ylevel)
sensi <- NaN
}
temp <- c(acc, sensi, speci)
return(temp)
}
remap.factor = function(f, min = 0, max = 1) {
f = as.numeric(f)
f.remap = f
f.remap[f == max(f)] = max
f.remap[f == min(f)] = min
return(f.remap)
}
perm.t.test = function(x1, x2, n = 5000, paired = F) {
# permutation t test for paired and independent samples:
mean.diff = mean(x1) - mean(x2)
perm.stats = rep(NA, n)
test.out = list()
if (paired) {
# paired permutation t-test: corresponding pairs in x1 and x2 were randomly
# shuffled across the columns of cbind(x1, x2), and the mean differenced were
# computed.
x = cbind(x1, x2)
n.obs = dim(x)[1]
for (i in 1:n) {
x1.perm.idx = sample(1:2, n.obs, replace = T)
x2.perm.idx = 2 - x1.perm.idx
perm.stats[i] = mean(x[cbind(1:n.obs, x1.perm.idx)] - x[cbind(1:n.obs, x2.perm.idx)])
}
} else{
# independent permutation t-test:
x = c(x1, x2)
n.obs = length(x)
for (i in 1:n) {
x.perm.idx = sample(1:2, n.obs, replace = T)
perm.stats[i] = mean(x[x.perm.idx == 1]) - mean(x[x.perm.idx == 2])
}
}
test.out$density = perm.stats
test.out$p.greater = sum(perm.stats > mean.diff) / n
test.out$p.smaller = sum(perm.stats < mean.diff) / n
test.out$p = sum(perm.stats < -abs(mean.diff) |
perm.stats > abs(mean.diff)) / n
return(test.out)
}
library(VGAM)
# t.test()
# cor.test()
library(ltm)
# biserial.cor()
#library(matrixcalc)
## matrix.inverse()
#library(MASS)
# ginv()
feature.weight.mean.diff.boot = function(x, y, nboots){
print("compute feature weight: mean.diff.boot...")
f.w.boot = matrix(NA, nboots, ncol(x))
y = remap.factor(y, 0, 1)
for (i in 1:nboots) {
set.seed(i)
boot.idx = sample(1:nrow(x), nrow(x), replace = T)
x.boot = x[boot.idx,]
y.boot = y[boot.idx]
mean1 = apply(x.boot[y.boot==0,], 2, mean)
mean2 = apply(x.boot[y.boot==1,], 2, mean)
f.w.boot[i, ] =mean1 - mean2
}
return(f.w.boot)
}
feature.cv.test.pca = function(feature.in,
factor,
k = 10,
ncomp = 10,
method = "wilcox") {
pca.out = prcomp(feature.in, scale = F, center = F)
feature.pca = pca.out$x
feature.weight.pca = feature.cv.test(feature.pca, factor, k, method)
#inv.rotation = ginv(pca.out$rotation[, 1:length(feature.weight.pca)])
inv.rotation = ginv(pca.out$rotation[, 1:ncomp])
feature.weight = feature.weight.pca[1:ncomp] %*% inv.rotation
return(feature.weight)
}
feature.cv.test = function(feature.in,
factor,
k = 10,
method = "wilcox",
glmnet.para,
seed = 111) {
# statistic tests to compute feature weights: if k == 1, run test on the whole
# sample without cross validation. the output is a vector with length same as
# the number of features i.e ncol(feature.in) this is useful in case we do boot
# strapping and compute coefficient of variation (CV) across boot strap samples.
num.feature = ncol(feature.in)
num.sample = nrow(feature.in)
set.seed(seed)
factor = as.numeric(factor)
idx.factor.0 = factor == min(factor)
idx.factor.1 = factor == max(factor)
if (k > 1) {
cv.k = createFolds(factor, k, list = F)
print("feature.cv.test: number of samples in each CV, for factor 0 and 1:")
print(table(cv.k[idx.factor.0]))
print(table(cv.k[idx.factor.1]))
} else{
# k is set 1 in bootstrap, in which all bootstrapped sample are selected
# setting cross-validation index as all 0 will enable all samples being selected
# in the for loop over 1 to k. feature.in[cv.k != i,]
cv.k = rep(0, num.sample)
}
test.out = matrix(NA, k, num.feature)
for (i in 1:k) {
idx = cv.k != i
if (method == "mean.diff") {
test.out[i, ] = apply(feature.in[cv.k != i & idx.factor.0,], 2, mean) -
apply(feature.in[cv.k != i & idx.factor.1,], 2, mean)
} else if (method == "glmnet.coef") {
# run cv.glmnet to get the coefficients and use the CV of them as feature weights.
set.seed(444)
cv.fit = cv.glmnet(
feature.in[cv.k != i, ],
factor[cv.k != i],
nfold = glmnet.para$nfolds.inner,
alpha = glmnet.para$alpha,
family = glmnet.para$family,
standardize = F
)
#test.out[i,] = abs(coef(cv.fit, s="lambda.min"))[-1]
test.out[i, ] = coef(cv.fit, s = "lambda.min")[-1]
} else{
for (i.feature in 1:num.feature) {
if (method == "kendall") {
test.result = cor.test(feature.in[idx, i.feature], factor[idx], method = "kendall")
value = abs(test.result$estimate)
} else if (method == "wilcox") {
test.result = wilcox.test(feature.in[idx, i.feature], factor[idx])
value = abs(test.result$estimate)
} else if (method == "spearman") {
test.result = cor.test(feature.in[idx, i.feature], factor[idx], method = "spearman")
value = abs(test.result$estimate)
} else if (method == "pearson") {
test.result = cor.test(feature.in[idx, i.feature], factor[idx])
value = abs(test.result$estimate)
} else if (method == "biserial") {
value = abs(biserial.cor(feature.in[idx, i.feature], factor[idx]))
}
test.out[i, i.feature] = value
}
}
}
return(test.out)
}
feature.cv.boot = function(feature.in,
factor,
n = 100,
method,
glmnet.para,
pca = F) {
f.cv.boot = matrix(NA, n, ncol(feature.in))
factor = remap.factor(factor)
for (i in 1:n) {
set.seed(i)
boot.idx = sample(1:nrow(feature.in), nrow(feature.in), replace = T)
feature.boot = feature.in[boot.idx,]
factor.boot = factor[boot.idx]
if (pca) {
f.cv.boot[i, ] = feature.cv.test.pca(feature.boot, factor.boot, k = 1, method)
} else{
f.cv.boot[i, ] = feature.cv.test(feature.boot, factor.boot, k = 1, method, glmnet.para)
}
}
return(f.cv.boot)
}
compute.feature.weight = function(x.train, y.train, method = "mean.diff.boot", glmnet.para) {
penalty.weight = method
cut.off = c(0,1)
if (penalty.weight == "none") {
f.weight.cv = rep(1, dim(x.train)[2])
} else {
if (penalty.weight == "mean.diff") {
f.weight = feature.cv.test(x.train, y.train, 10, "mean.diff", glmnet.para)
} else if (penalty.weight == "wilcox") {
f.weight = feature.cv.test(x.train, y.train, 10, "wilcox", glmnet.para)
} else if (penalty.weight == "kendall") {
f.weight = feature.cv.test(x.train, y.train, 10, "kendall", glmnet.para)
} else if (penalty.weight == "pearson") {
f.weight = feature.cv.test(x.train, y.train, 10, "pearson", glmnet.para)
} else if (penalty.weight == "mean.diff.boot") {
f.weight = feature.weight.mean.diff.boot(x.train,
y.train,
nboots = 500)
} else if (penalty.weight == "pearson.boot") {
f.weight = feature.cv.boot(x.train,
y.train,
n = 500,
method = "pearson",
glmnet.para)
} else if (penalty.weight == "biserial.boot") {
f.weight = feature.cv.boot(x.train,
y.train,
n = 500,
method = "biserial",
glmnet.para)
} else if (penalty.weight == "glmnet.coef.boot") {
f.weight = feature.cv.boot(x.train,
y.train,
n = 500,
method = "glmnet.coef",
glmnet.para)
} else if (penalty.weight == "mean.diff.boot.pca") {
f.weight = feature.cv.boot(
x.train,
y.train,
n = 500,
method = "mean.diff",
glmnet.para,
pca = T
)
}
# compute coefficient of variation and map it to range 1:100.
if (dim(f.weight)[1]>1){
f.weight.cv = scale.0.1(abs(apply(f.weight, 2, sd) / apply(f.weight, 2, mean)))*99 + 1
} else{
f.weight.cv = scale.0.1(1/f.weight)*99 +1
}
if (glmnet.para$cutoff[1]>0){
cutoff.value = quantile(f.weight.cv, cutoff[1])
#feature.weight[feature.weight<cutoff.value] = min(feature.weight)
f.weight.cv[f.weight.cv<cutoff.value] = 0
}
if (glmnet.para$cutoff[2]<1){
cutoff.value = quantile(f.weight.cv, cutoff[2])
f.weight.cv[f.weight.cv>cutoff.value] = 1
}
# log transform the weights:
if (glmnet.para$log.penalty.weight) {
print('log transform weight:')
f.weight.cv = log(f.weight.cv, base = 100)
}
}
return(f.weight.cv)
}
library(glmnet)
#library(SIS)
glmnet.tune = function(x,
y,
k,
alpha = 1,
lambda.seq = 10 ^ seq(-4, 3, length = 70),
f.weights) {
# use balanced accuracy (mean of sensitivity and specificity) rather than accuracy to run cross-validation:
#set.seed(222)
set.seed(123)
if (missing(f.weights)) {
f.weights = rep(1, dim(x)[2])
}
cv.k = createFolds(y, k, list = F)
test.acc = matrix(NA, k, length(lambda.seq))
glmnet.control(mxit = 1000000)
k = max(c(5, k))
for (i in 1:k) {
x.train = x[cv.k != i, ]
x.test = x[cv.k == i, ]
y.train = y[cv.k != i]
y.test = y[cv.k == i]
for (j in 1:length(lambda.seq)) {
mod = glmnet(
x.train,
y.train,
family = "binomial",
alpha = alpha,
lambda = lambda.seq[j],
standardize = F,
penalty.factor = f.weights
)
y.pred = predict(mod, x.test, s = lambda.seq[j], type = "class")
acc = compute.acc(y.pred, y.test)
test.acc[i, j] = (acc[2] + acc[3]) / 2
#test.acc[i,j] = acc[1]
}
}
acc.cv = apply(test.acc, 2, sd) / apply(test.acc, 2, mean)
min.idx = which.min(acc.cv)
return(lambda.seq[min.idx])
}
glmnet.nested.cv = function(x, y, glmnet.para) {
k = glmnet.para$nfolds
set.seed(222)
cv.k = createFolds(y, k, list = F)
if (glmnet.para$family == "binomial") {
y.num = as.numeric(y)
idx.factor.0 = y.num == min(y.num)
idx.factor.1 = y.num == max(y.num)
print("glmnet.nested.cv: number of samples in each CV, for factor 0 and 1:")
print(table(cv.k[idx.factor.0]))
print(table(cv.k[idx.factor.1]))
}
penalty.weight = glmnet.para$penalty.weight
quantile.thresh = glmnet.para$quantile.thresh
alpha = glmnet.para$alpha
#print(t(rbind(cv.k, y)))
test.result = data.frame(
acc = rep(NA, k),
sensi = rep(NA, k),
speci = rep(NA, k)
)
train.result = data.frame(
acc = rep(NA, k),
sensi = rep(NA, k),
speci = rep(NA, k)
)
result.coefs = matrix(NA, ncol(x) + 1, k)
result.feature.weights = matrix(NA, ncol(x), k)
# outer CV:
for (i in 1:k) {
print(length(cv.k))
print(dim(x))
x.train = x[cv.k != i, ]
x.test = x[cv.k == i, ]
y.train = y[cv.k != i]
y.test = y[cv.k == i]
glmnet.control(mxit = 1000000)
if (length(glmnet.para$lambda.seq) == 1 &
glmnet.para$lambda.seq == 0) {
# no regularization, in this case, alpha is not necessary.
glmnet.fit = glmnet(
x.train,
y.train,
family = toString(glmnet.para$family),
lambda = glmnet.para$lambda.seq,
standardize = F
)
result.feature.weights = NULL
} else {
# with regularization.
#print("computing feature weights:")
#print(penalty.weight)
f.weight.cv = compute.feature.weight(x.train, y.train, penalty.weight, glmnet.para)
#print("feature penalty weights:")
#print(f.weight.cv)
error.list = rep(NA, length(alpha))
fit.list = vector("list", length(alpha))
# tune on lambda and alpha:
if (length(glmnet.para$lambda.seq) == 1) {
if (glmnet.para$lambda.seq == "default"){
# default lambda sequence of cv.glmnet:
lambda.seq = formals(cv.glmnet)$lambda
} else {
# fixed lambda, just tune on alpha:(cv.glmnet can just take lambda as a sequence.)
lambda.min = glmnet.para$lambda.seq
}
} else if (length(glmnet.para$lambda.seq) > 1) {
# costomized lambda sequence:
lambda.seq = glmnet.para$lambda.seq
}
#print(lambda.seq)
# inner CV to optimized alpha:
for (i.alpha in 1:length(alpha)) {
print("alpha:")
print(alpha[i.alpha])
print(toString(glmnet.para$family))
set.seed(123)
# inner CV to optimize lambda:
fit.list[[i.alpha]] = cv.glmnet(
x.train,
y.train,
nfolds = glmnet.para$nfolds.inner,
family = toString(glmnet.para$family),
# added on July 25 2018. all analysis before used the default lambda.
lambda = lambda.seq,
alpha = alpha[i.alpha],
penalty.factor = f.weight.cv,
type.measure = toString(glmnet.para$type.measure),
standardize = F
)
}
error.list[i.alpha] = min(fit.list[[i.alpha]]$cvm)
#print("tuning on alpha")
#print(cbind(error.list, alpha))
min.i.alpha = which.min(error.list)
cv.fit = fit.list[[min.i.alpha]]
lambda.min = cv.fit$lambda.min
alpha.min = alpha[min.i.alpha]
print(dim(result.feature.weights))
print(length(f.weight.cv))
result.feature.weights[, i] <- f.weight.cv
#lambda.min = glmnet.tune(x.train, y.train, k, alpha, lambda.seq, f.cv)
print("glmnet.cv.fun: min lambda:")
print(lambda.min)
# fit with all the training data, this can be obtained directly from output of cv.glmnet (out$glmnet.fit)
# glmnet.fit = glmnet(x.train, y.train,
# family = glmnet.para$family,
# alpha=alpha[min.i.alpha],
# penalty.factor = f.weight.cv,
# lambda = lambda.min,
# standardize = F)
glmnet.fit = cv.fit$glmnet.fit
coefs <- coef(cv.fit, s = "lambda.min")
}# end with regularization.
print(length(as.vector(coefs)))
print(dim(result.coefs))
result.coefs[, i] <- as.vector(coefs)
#f.idx = f.weight.cv<=quantile(f.weight.cv, quantile.thresh)
#cv.fit = cv.glmnet(x.train[, f.idx], y.train, nfolds = k, family = "binomial", penalty.factor = f.weight.cv[f.idx], alpha=alpha, type.measure = "class", standardize = F)
#print("number of predictors included in lasso:")
#print(length(coefs!=0))
# balanced accuracy:
#lambda.min = glmnet.tune(x.train, y.train, k, alpha, lambda.seq, f.cv)
#figure.name = paste("cvfit", feature.name[i.feature], toString(k),".png", sep = "_")
#png(figure.name)
#plot(cv.fit)
#dev.off()
#y.pred = predict(cv.fit$glmnet.fit, x.test, s = lambda.min, type = glmnet.para$predict.type)
#y.pred.train = predict(cv.fit$glmnet.fit, x.train, s = lambda.min, type = glmnet.para$predict.type)
y.pred = predict(glmnet.fit,
x.test,
s = lambda.min,
type = glmnet.para$predict.type)
y.pred.train = predict(glmnet.fit,
x.train,
s = lambda.min,
type = glmnet.para$predict.type)
if (glmnet.para$family == "gaussian") {
result.test = cor.test(y.test, y.pred, method = "pearson")
result.train = cor.test(y.train, y.pred.train, method = "pearson")
rmse.test = sqrt(mean((y.test - y.pred) ^ 2))
print("prediction of testing data:")
print(result.test)
print("prediction of training data:")
print(result.train)
test.result[i, 1] = result.test$estimate
test.result[i, 2] = rmse.test
colnames(test.result)[2] = "rmse"
train.result[i, 1] = result.train$estimate
} else {
print("prediction of testing data:")
print(table(y.test, y.pred))
print("prediction of training data:")
print(table(y.train, y.pred.train))
result.test = compute.acc(y.test, y.pred)
result.train = compute.acc(y.train, y.pred.train)
test.result[i, ] = result.test
train.result[i, ] = result.train
}
} # end outer CV
# compute robustness of coefs across CV:
coefs.robustness = abs(apply(result.coefs, 1, mean) / apply(result.coefs, 1, sd))
coefs.sd = apply(result.coefs, 1, sd)
#print("robustness")
#print(coefs.robustness)
result.coefs.df = data.frame(c("intercept", colnames(x)),
result.coefs,
coefs.sd,
coefs.robustness,
stringsAsFactors = F)
# convert coefs to 1 or 0 as indicator of feature selection. 1st row as intercept is removed:
result.coefs.ind = matrix(
as.numeric(result.coefs != 0),
nrow = nrow(result.coefs),
ncol = ncol(result.coefs)
)[-1, ]
# compute the number of times each feature is selected in the cross validation:
coefs.ind.sum = apply(result.coefs.ind, 1, sum)
#reproducibility.index = (abs(coefs.ind.sum - k/2)-(k/2)%%1)/floor(k/2)
reproducibility.index = rep(NA, length(coefs.ind.sum))
none.zero.index = coefs.ind.sum > 0
#print(result.coefs)
reproducibility.index[none.zero.index] = (abs(coefs.ind.sum[none.zero.index] - k /
2) - (k / 2) %% 1) / floor(k / 2)
# it is so weird that when reproducibility.index is cbind to other matrix and vector and changed to data frame, it become a factor!!!
# and it won't happen if we use data.frame(...) directly rather than as.data.frame(cbind(...))
result.coefs.ind = data.frame(
colnames(x),
result.coefs.ind,
coefs.ind.sum,
reproducibility.index,
stringsAsFactors = F
)
#print(result.coefs.df)
glmnet.out = list()
glmnet.out$test.result = test.result
glmnet.out$train.result = train.result
glmnet.out$coefs.ind = result.coefs.ind
glmnet.out$coefs = result.coefs.df
glmnet.out$coefs.robustness = mean(coefs.robustness, na.rm = T)
glmnet.out$reproducibility = mean(reproducibility.index, na.rm = T)
glmnet.out$penalty.weights = result.feature.weights
if (glmnet.para$return.mod) {
mod.all = glmnet(
x,
y,
family = glmnet.para$family,
lambda = lambda.min,
alpha = alpha[min.i.alpha],
penalty.factor = f.weight.cv,
standardize = F
)
glmnet.out$mod = mod.all
}
return(glmnet.out)
print("glmnet.nested.cv: finished")
}
library(e1071)
# svm tune.svm
library(plyr)
# rbind.fill()
svm.weights <- function(model) {
# This function gives the weights of the hiperplane
w = 0
if (model$nclasses == 2) {
w = t(model$coefs) %*% model$SV
} else{
#when we deal with OVO svm classification
## compute start-index
start <- c(1, cumsum(model$nSV) + 1)
start <- start[-length(start)]
calcw <- function (i, j) {
## ranges for class i and j:
ri <- start[i]:(start[i] + model$nSV[i] - 1)
rj <- start[j]:(start[j] + model$nSV[j] - 1)
## coefs for (i,j):
coef1 <- model$coefs[ri, j - 1]
coef2 <- model$coefs[rj, i]
## return w values:
w = t(coef1) %*% model$SV[ri, ] + t(coef2) %*% model$SV[rj, ]
return(w)
}
W = NULL
for (i in 1:(model$nclasses - 1)) {
for (j in (i + 1):model$nclasses) {
wi = calcw(i, j)
W = rbind(W, wi)
}
}
w = W
}
return(w)
}
select.feature = function(feature.in,
factor,
p,
method = "ttest",
k = 5) {
factor = as.numeric(factor)
f.idx = rep(NA, ncol(feature.in))
p = min(p, ncol(feature.in))
for (i.feature in 1:ncol(feature.in)) {
if (method == "ttest") {
test.result = t.test(feature.in[, i.feature] ~ factor, var.equal = TRUE)
f.idx[i.feature] = test.result$p.value
} else if (method == "wilcox") {
test.result = wilcox.test(feature.in[, i.feature], factor)
f.idx[i.feature] = test.result$p.value
} else if (method == "kendall") {
test.result = cor.test(feature.in[, i.feature], factor, method = "kendall")
f.idx[i.feature] = test.result$p.value
} else if (method == "spearman") {
test.result = cor.test(feature.in[, i.feature], factor, method = "spearman")
f.idx[i.feature] = test.result$p.value
} else if (method == "coef.variation") {
#cv.mean = aggregate(feature.in, list(cv.k), FUN=mean)
f.idx = feature.cv(feature.in, factor, k)
break
} else if (method == "boot.var") {
f.idx = feature.cv.boot(feature.in, factor, k)
break
} else if (method == "cv.wilcox") {
f.idx = feature.cv.test(feature.in, factor, method = "wilcox", k)
break
}
}
#test.result = lapply(feature.in, function(x) t.test(x ~ factor, var.equal = TRUE))
#p.value = test.result$p.value;
if (p < 1) {
# select feature with p less than p threshold:
feature.idx = which(f.idx < p)
}
else{
# select features with most significant p values
p.sort = sort(f.idx,
index.return = T,
decreasing = F)
feature.idx = p.sort$ix[1:p]
}
#print("selected features:")
#print(feature.idx)
return(feature.idx)
}
svm.cv.fun = function(brain.feature,
subject.info,
cost.seq,
svm.para) {
# num.feature: a sequence of number of selected features or threshold of p values: see function select.feature
# the best value is selected based on the training data:
subject.info$factor = as.factor(subject.info$factor)
set.seed(333)
cv.k = createFolds(subject.info$factor, svm.para$nfolds, list = F)
num.feature2 = seq(
svm.para$num.feature2.start,
svm.para$num.feature2.end,
by = svm.para$num.feature2.step
)
test.result = data.frame(
acc = rep(NA, svm.para$nfolds),
sensi = rep(NA, svm.para$nfolds),
speci = rep(NA, svm.para$nfolds)
)
train.result = data.frame(
acc = rep(NA, svm.para$nfolds),
sensi = rep(NA, svm.para$nfolds),
speci = rep(NA, svm.para$nfolds)
)
tune.result = matrix(NA, svm.para$nfolds + 1, length(num.feature2))
tune.result[1,] = num.feature2
tune.control = tune.control(cross = svm.para$nfolds)
for (i in 1:svm.para$nfolds) {
brain.feature.train = brain.feature[cv.k != i, ]
brain.feature.test = brain.feature[which(cv.k == i), , drop = F]
subject.info.train = subject.info[cv.k != i, ]
subject.info.test = subject.info[which(cv.k == i), , drop = F]
length.num.feature = length(num.feature2)
feature.idx = list()
best.cost = vector()
# select features by runing t test or kendall's tau:
f.idx1 = select.feature(
brain.feature.train,
subject.info.train$factor,
svm.para$num.feature1,
svm.para$feature.selection1,
svm.para$nfolds
)
for (j in 1:length.num.feature) {
# select feature by coefficient of variation:
f.idx2 = select.feature(
brain.feature.train[, f.idx1],
subject.info.train$factor,
num.feature2[j],
svm.para$feature.selection2,
svm.para$nfolds
)
feature.idx[[j]] = f.idx2[!is.na(f.idx2)]
#feature.idx = svmrfe(brain.feature.train, subject.info.train$factor, num.feature)
print("selecting features for svm:")
print(feature.idx[[j]])
feature.train.j = cbind(subject.info.train, brain.feature.train[, feature.idx[[j]], drop =
F])
set.seed(123)
tune.svm = tune(
svm,
factor ~ .,
data = feature.train.j,
kernel = "linear",
ranges = list(cost = cost.seq),
tunecontrol = tune.control
)
tune.result[i + 1, j] = tune.svm$best.performance
best.cost[j] = tune.svm$best.parameters$cost
print(summary(tune.svm))
#print(tune.svm$best.parameters$cost)
#print(tune.svm$best.modal$coefs)
#print(svm.weights(tune.svm$best.modal))
}
best.j = which.min(tune.result[i + 1, ])
feature.train = cbind(subject.info.train, brain.feature.train[, feature.idx[[best.j]], drop =
F])
feature.test = cbind(subject.info.test, brain.feature.test[, feature.idx[[best.j]], drop =
F])
svm.mod = svm(factor ~ .,
data = feature.train,
kernel = "linear",
cost = best.cost[best.j])
weights.i = data.frame(svm.weights(svm.mod))
#colnames(weights.i) = colnames(subset(feature.train, select = -factor))
if (i == 1) {
feature.weights = weights.i
} else{
feature.weights = rbind.fill(feature.weights, weights.i)
}
svm.pred = predict(svm.mod, subset(feature.test, select = -factor))
svm.pred.train = predict(svm.mod, subset(feature.train, select = -factor))
#print("prediction of testing data:")
#print(cbind(feature.test$factor, svm.pred))
#print(table(feature.test$factor, svm.pred))
#print("prediction of training data:")
#print(table(feature.train$factor, svm.pred.train))
result.test = compute.acc(feature.test$factor, svm.pred)
result.train = compute.acc(feature.train$factor, svm.pred.train)
test.result[i, ] = result.test
train.result[i, ] = result.train
}
print("tune number of features:")
print(tune.result)
return(list(test.result, train.result, feature.weights, tune.result))
}
|
25d5465799b9dc8b15d27845762d6f775fde1875
|
2e1fc83f033dc1692f8ec5eaa7e54fecfce86dcb
|
/combined_Kallithea_genome_diversity_fig.R
|
91c137afaf5086d7641fdf89adef5ae308104180
|
[] |
no_license
|
megan-a-wallace/Dros_DNA_virus_diversity
|
49d6d753babf72bb76f0c4d2ef67e5efd552a98f
|
f05722a266132a8ec843f12d6795cbc8d1a5b886
|
refs/heads/master
| 2023-04-30T15:29:22.155786
| 2021-01-27T21:59:14
| 2021-01-27T21:59:14
| 299,576,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,504
|
r
|
combined_Kallithea_genome_diversity_fig.R
|
############################################
## Kallithea virus genomic diversity plot ##
############################################
##Author : Megan Wallace
##September 2020
##Figure showing variation in total piS and intergenic pi across the Kallithea genome, above variation in the % of samples showing InDel support
#setwd("C:/Users/User/Dropbox/PhD - 1st Year/popgen_drosEU")
setwd("C:/Users/s1667991/Dropbox/PhD - 1st Year/popgen_drosEU")
require(tidyverse); require(evobiR); require(seqinr)
virusid="KX130344_Kallithea_virus"
virusstem<-gsub('[A-Z]{2}[0-9]{6}_|_virus','',virusid)
#####################################
## Importing the genome info (gff) ##
#####################################
##Importing genome data (location of CDS) and list of samples
refseqlength=152388
read.table(file = "nonKallithea_viruses_diversity/KX130344_Kallithea_virus.edited.cds.gtf", col.names = c("seqname","source","feature","start","end","score","strand","frame","id"), sep = "\t")->Kallithea_cds_positions
read.table(file = "nonKallithea_viruses_diversity/Kallithea.mpileup.25.bam.list.txt", sep = "\t")->Kallithea_sample_list
gsub('Kallithea.virus.analyses/|.Kallithea.bwa.noTIRs_InDel.bam','',Kallithea_sample_list$V1)->Kallithea_sample_list
##############################
## Importing the InDel data ##
##############################
###Importing InDel data
##Combined table of indellic locations across all Kallithea positive samples in a binary format, coded as support for a gap location if at least 5 reads support it in that sample.
indel.positions=read.table(file = "nonKallithea_viruses_diversity/Kallithea.25.combined.indel.positions.txt", sep = "\t", col.names = c("virus","sample","position","indel_support"))
##############################
## Preparing the InDel Data ##
##############################
#Labelling each position in the indel incidence data as cds or intergenic
indel.positions$cds<-character(length = length(indel.positions$position))
indel.positions$cds<-ifelse(sapply(indel.positions$position, function(p) any(Kallithea_cds_positions$start <= p & Kallithea_cds_positions$end >= p)),"cds","ig")
#creating a dataframe showing the number of samples with indel support
indel.positions$sample<-as.factor(indel.positions$sample)
no_samples<-as.numeric(nlevels(indel.positions$sample))
indel_sums <- indel.positions %>%
group_by(position) %>%
summarise(sum_indel = sum(indel_support))
indel_sums$cds<-character(length = length(indel_sums$position))
indel_sums$cds<-ifelse(sapply(indel_sums$position, function(p) any(Kallithea_cds_positions$start <= p & Kallithea_cds_positions$end >= p)),"cds","ig")
indel_window<-10
indel_perc <- cbind(indel_sums,indel_sums$sum_indel/no_samples)
sliding_indel_perc<-SlidingWindow("mean",indel_perc[,4],indel_window,1)
sliding_indel_pos<-seq(median(seq(1,indel_window,by=1)),refseqlength-(indel_window-median(seq(1,indel_window,by=1))),by = 1)
sliding_indel_data<-as.data.frame(cbind(sliding_indel_pos,sliding_indel_perc))
colnames(sliding_indel_data)<-c("pos","perc")
#and preparing the data which will become the plotted polygons for intergenic and coding regions
#adding a col into the sliding perc data frame to colour the line by CDS or non-CDS status
cds_col<-"#E69F00"
ig_col<-"darkblue"
sliding_indel_data$col<-character(length = length(sliding_indel_data$pos))
sliding_indel_data$col<-ifelse(sapply(sliding_indel_data$pos, function(p) any(Kallithea_cds_positions$start <= p & Kallithea_cds_positions$end >= p)),cds_col,ig_col)
#setting up the data so theres a separate percentage col for ig regions and cds regions
sliding_indel_data_polygon<-as.data.frame(cbind(sliding_indel_data$pos,sliding_indel_data$col))
colnames(sliding_indel_data_polygon)<-c("pos","col")
sliding_indel_data_polygon$perc_intergenic<-sliding_indel_data$perc
sliding_indel_data_polygon$perc_cds<-sliding_indel_data$perc
sliding_indel_data_polygon$perc_intergenic[sliding_indel_data_polygon$col==cds_col]<-0
sliding_indel_data_polygon$perc_cds[sliding_indel_data_polygon$col==ig_col]<-0
###########################################
## Inspecting the distribution of InDels ##
###########################################
#Adding a binary col to the sums data frame (to indicate that an InDel is found at a position at least once), then using this to investigate whether indel incidence is more likely in intergenic or coding regions of the genome
indel_sums<-data.frame(indel_sums,binary_support=numeric(length = length(indel_sums$position)))
indel_sums$binary_support[indel_sums$sum_indel>=1]<-1
indel_sums$no_samples=as.numeric(rep.int(no_samples,times = length(indel_sums$binary_support)))
no_intergenic_indels<-as.numeric(length(indel_sums$binary_support[indel_sums$binary_support==1 & indel_sums$cds=="ig"]))
no_cds_indels<-as.numeric(length(indel_sums$binary_support[indel_sums$binary_support==1 & indel_sums$cds=="cds"]))
no_cds_non_indels<-as.numeric(length(indel_sums$binary_support[indel_sums$binary_support==0 & indel_sums$cds=="cds"]))
no_intergenic_non_indels<-as.numeric(length(indel_sums$binary_support[indel_sums$binary_support==0 & indel_sums$cds=="ig"]))
#Now using chi squared test to examine whether rows and columns of the contingency table are statistically independent or not - eg. is the distribution of indels and non-indels in the genome independent of the distribution of cds and intergenic sites
#Our data table is the no_cds indels, no_integenic indels, no_cds non indels, no_intergenic non indels
chisq_test_data<-matrix(data = c(no_intergenic_indels,no_cds_indels,no_intergenic_non_indels,no_cds_non_indels), nrow = 2, ncol = 2, byrow = TRUE)
rownames(chisq_test_data)<-c("indels","non-indels")
colnames(chisq_test_data)<-c("intergenic","cds")
Kallithea_indel_chisq<-chisq.test(chisq_test_data)
#to look at the residuals
Kallithea_indel_chisq$residuals
#Chi-square test for independence on the number of indels found in cds and intergenic regions, compared to the expected numbers based on the number of total cds and intergenic sites found a strong positive association between intergenic regions and finding indels (X-squared = 3236, df = 1, p-value < 2.2e-16)
######################################
## Importing the piS and intpi data ##
######################################
##Importing total piS and intergenic pi data
# intergenic and syn-nsyn per position nucleotide diversity files for total/merged population
read.table(file = "nonKallithea_viruses_diversity/Kallithea.25.merged.bwa.500.fc.wholegenome.syn-nsyn.snps.nulc.div.tsv", sep = "\t", header = TRUE)->Kallithea.per.position.syn.nsyn.nucl.div
read.table(file = "nonKallithea_viruses_diversity/Kallithea.wholepop.intergenic.per.site.nucl.div.tsv", sep = "\t", header = TRUE)->Kallithea.per.position.intergenic.nucl.div
######################################
## Preparing the piS and intpi data ##
######################################
#making data frame of nucleotide diversity fr the total/merged population
data.frame(position = as.numeric(seq(1,refseqlength,by = 1)), type = character(length = refseqlength), nucl_div = numeric(length = refseqlength))->Kallithea.nucl.div.data
Kallithea.nucl.div.data$type<-ifelse(sapply(Kallithea.nucl.div.data$position, function(p) any(Kallithea_cds_positions$start <= p & Kallithea_cds_positions$end >= p)),"cds","ig")
#putting the nucleotide diversity from intergenic sites into the combined data frame
Kallithea.nucl.div.data$nucl_div[Kallithea.per.position.intergenic.nucl.div$position[Kallithea.per.position.intergenic.nucl.div$site_nucl_div_maf>0]]<-Kallithea.per.position.intergenic.nucl.div$site_nucl_div_maf[Kallithea.per.position.intergenic.nucl.div$site_nucl_div_maf>0]
#putting the nucleotide diversity from the synonymous coding sites into the combined data frame
Kallithea.nucl.div.data$nucl_div[Kallithea.per.position.syn.nsyn.nucl.div$position[Kallithea.per.position.syn.nsyn.nucl.div$site_nucl_div_maf>0 & Kallithea.per.position.syn.nsyn.nucl.div$type=="syn"]]<-Kallithea.per.position.syn.nsyn.nucl.div$site_nucl_div_maf[Kallithea.per.position.syn.nsyn.nucl.div$site_nucl_div_maf>0 & Kallithea.per.position.syn.nsyn.nucl.div$type=="syn"]
#Figuring out the number of synonymous sites per codon for the denominator of the sliding window
#importing the non-synonymous length table for Kallithea virus (generated using popoolation)
snl.table=read.table("nonKallithea_viruses_diversity/snl.Kallithea.txt",header = FALSE,col.names = c("codon","nsyn_length"),skip = 5,sep = ":")
snl.table$syn_length<-(3-snl.table$nsyn_length)
snl.table<-snl.table[,c(1,3)]#removing the nsyn length values
cds.seqs=read.fasta("nonKallithea_viruses_diversity/Kallithea.cds.regions.fasta", forceDNAtolower = FALSE, strip.desc = TRUE)
ref.seq=read.fasta("nonKallithea_viruses_diversity/KX130344_Kallithea_virus.fasta",forceDNAtolower = FALSE, strip.desc = TRUE)
refseq_mat<-matrix(data = ref.seq$KX130344_Kallithea_virus, nrow = refseqlength, ncol = 1, byrow = TRUE)
#initialising data frame for results
syn_sites_per_codon<-data.frame(position = seq(1,refseqlength,1), cds = rep.int("intergenic",times = refseqlength), sequence = refseq_mat, codon = character(length = refseqlength), int_syn_sites = rep.int(1,times = refseqlength), stringsAsFactors = FALSE)
#Adding the names of the genes/intergenic to the data frame from the gtf gene positions
for (k in 1:length(Kallithea_cds_positions$start)){
Kallithea_cds_positions$start[k]->start
Kallithea_cds_positions$end[k]->end
gsub("gene_id ","",Kallithea_cds_positions$id[k])->name
gsub("; transcript_id.*","",name)->name
syn_sites_per_codon$cds<-ifelse(sapply(syn_sites_per_codon$position, function(p) any(start <= p & end >= p)),name,syn_sites_per_codon$cds)
}
#adding the codons that each position is a part of into the table
refseq_codons<-matrix(data = refseq_mat, nrow = refseqlength/3, ncol = 3, byrow = TRUE)
refseq_codons<-paste(refseq_codons[,1], refseq_codons[,2], refseq_codons[,3], sep = "")
codon_vec<-vector()
for (m in 1:length(refseq_codons)) {
rep.int(refseq_codons[m],times = 3)->three_codons
codon_vec<-c(codon_vec,three_codons)
}
syn_sites_per_codon$codon<-codon_vec
#adding the synonymous length of each coding codon into the table
syn_sites_per_codon$int_syn_sites[syn_sites_per_codon$cds!="intergenic"]<-snl.table$syn_length[match(syn_sites_per_codon$codon,snl.table$codon)]
#and now dividing the coding site syn lengths by 3 so its per site rather than per codon
syn_sites_per_codon$int_syn_sites_per_position<-1
syn_sites_per_codon$int_syn_sites_per_position[syn_sites_per_codon$cds!="intergenic"]<-syn_sites_per_codon$int_syn_sites[syn_sites_per_codon$cds!="intergenic"]/3
####And now calculating a slding window of piS/intergenic pi along the Kallithea genome
#edited a function I found on this guthub so that instead of a mean, it would divide by the sum of number of syn and intergenic sites
#http://coleoguy.blogspot.com/2014/04/sliding-window-analysis.html
#takes a vector of nucleotide diversities, and vector of per position intergenic or synonymous site numbers
slideFunct <- function(nucl_div, sites, window, step){
total <- length(nucl_div)
spots <- seq(from=1, to=(total-window), by=step)
result <- vector(length = length(spots))
for(i in 1:length(spots)){
result[i] <- sum(nucl_div[spots[i]:(spots[i]+window)])/sum(sites[spots[i]:(spots[i]+window)])
}
return(result)
}
##Creating two sliding window data series, one with a windw size of 1000, and another with 5000
window<-1000
step<-200
sliding_nucl_div<-slideFunct(Kallithea.nucl.div.data$nucl_div,syn_sites_per_codon$int_syn_sites_per_position,window = window,step = step)
sliding_nucl_div_pos<-seq(median(seq(1,window,by=1)),refseqlength-(window-median(seq(1,window,by=1))),by = step)
sliding_nucl_div_data<-as.data.frame(cbind(sliding_nucl_div_pos,sliding_nucl_div))
colnames(sliding_nucl_div_data)<-c("pos","pi")
##And a second line on the plot with a 5kb window, to show larger area patterns
window<-5000
step<-1000
sliding_nucl_div_2<-slideFunct(Kallithea.nucl.div.data$nucl_div,syn_sites_per_codon$int_syn_sites_per_position,window = window,step = step)
sliding_nucl_div_pos_2<-seq(median(seq(1,window,by=1)),refseqlength-(window-median(seq(1,window,by=1))),by = step)
sliding_nucl_div_data_2<-as.data.frame(cbind(sliding_nucl_div_pos_2,sliding_nucl_div_2))
colnames(sliding_nucl_div_data_2)<-c("pos","pi")
###############################
## And now creating the plot ##
###############################
###sliding piS and intergenic pi
################################
#making some fake data to set up the plot
runif(refseqlength,min(sliding_nucl_div_data$pi),max(sliding_nucl_div_data$pi))->nucl_div_fake
seq(from=1,to=refseqlength,by=1)->pos_fake
fake_data_nucl_div<-data.frame(pos_fake,nucl_div_fake)
par(mar = c(3, 5.5, 0.5, 0.5), # change the margins
lwd = 1.5,# increase the line thickness
cex.axis = 1.2, # increase default axis label size
cex.lab = 1.3)
par(mfrow=c(2,1))#creating two plots, one on top of the other
#plotting the fake data
plot(fake_data_nucl_div$pos_fake,fake_data_nucl_div$nucl_div_fake,axes = FALSE, main="", xlab="",
ylab="", pch = '')
#Adding grey boxes to the plot to indicate non-coding regions
#rect(xleft, ybottom, xright, ytop, density = NULL, angle = 45,col = NA, border = NULL, lty = par("lty"), lwd = par("lwd"),.)
rect(0, 0, 152388, 0.01, border = NA, col = "azure2")
#cds in white
rect_xmin<-Kallithea_cds_positions$start
rect_xmax<-Kallithea_cds_positions$end
rect(rect_xmin, 0,rect_xmax, 0.01, border = NA, col = "white")
#adding y axis
axis(2, at = seq(0,0.008,by = 0.002), tick = TRUE, pos = par("usr")[1]+4500,labels = FALSE)
text(x = par("usr")[1]+2000,
y = seq(0,0.008,by = 0.002),
labels = seq(0,0.008,by = 0.002),
adj = 1,
xpd = NA,
cex = 1.2)
mtext(side = 2, line = 2.75, "pi at intergenic & \nsynonymous sites", cex = 1.3)
#adding x axis
vec<-c(0,15,30,45,60,75,90,105,120,135,150,153)
axis(1, at = vec*1000, tick = TRUE, pos = par("usr")[3]+0.0001, labels = FALSE)
text(x = vec*1000,
y = par("usr")[3]-0.0004,
labels = c(vec[1:11],""),
cex = 1.2,
xpd = NA)
#plotting the real data
lines(sliding_nucl_div_data$pos,sliding_nucl_div_data$pi,lwd = 1.95,col="darkblue")
#plotting the real data with a larger sliding window
lines(sliding_nucl_div_data_2$pos,sliding_nucl_div_data_2$pi,lwd = 3.7,col="orange")
#Adding legend to the plot
legend_lines_coords_ig<-matrix(data = c(vec[1]*1000+600,0.0062,vec[2]*1000+600,0.0062) ,nrow = 2,ncol = 2,byrow = TRUE)
legend_lines_coords_cds<-matrix(data = c(vec[1]*1000+600,0.0058,vec[2]*1000+600,0.0058) ,nrow = 2,ncol = 2,byrow = TRUE)
lines(legend_lines_coords_ig, col=ig_col, lwd=1.95)
lines(legend_lines_coords_cds, col=cds_col, lwd=3.7)
legend(vec[2]*1000-2000,0.0066,legend=c("1000 bp window","5000 bp window"),plot=T,bty="n",cex = 0.9, y.intersp = 1, adj = c(0,0.5))
####And the second, lower panel : distribution of indels along the Kallithea virus genome
#making some fake data to set up plot
runif(refseqlength,0,100)->perc_fake
seq(1,refseqlength,by=1)->pos_fake_indel
fake_data_perc<-data.frame(pos_fake_indel,perc_fake)
#plotting the fake data
plot(fake_data_perc$pos_fake_indel,fake_data_perc$perc_fake,axes = FALSE, main="", xlab="",
ylab="", pch = '')
#Adding grey boxes to the plot to indicate non-coding regions
rect(0, 0, 152388, 100, border = NA, col = "azure2")
#cds in white
rect_xmin<-Kallithea_cds_positions$start
rect_xmax<-Kallithea_cds_positions$end
rect(rect_xmin, 0,rect_xmax, 100, border = NA, col = "white")
#adding y axis
axis(2, at = seq(0,100,by = 20), tick = TRUE, pos = par("usr")[1]+4500,labels = FALSE)
text(x = par("usr")[1]+2000,
y = seq(0,100,by = 20),
labels = seq(0,100,by = 20),
## Rotate the labels by 35 degrees.
xpd = NA,
adj = 1,
cex = 1.2)
mtext(side = 2, line = 2.75, "% of samples \nwith gap support", cex = 1.3)
#adding x axis
axis(1, at = vec*1000, tick = TRUE, pos = par("usr")[3]+2, labels = FALSE)
text(x = vec*1000,
y = par("usr")[3]-6,
labels = c(vec[1:11],""),
cex = 1.2,
xpd = NA)
mtext(side = 1, line = 1.75, "Position in reference genome (Kb)", cex = 1.3)
#plotting the real data
#with polygons
#plotting two polygons, one for the intergenic regions and one for the CDS
polygon(sliding_indel_data_polygon$pos,sliding_indel_data_polygon$perc_cds*100,col = cds_col, border = cds_col, lwd = 1.8)
polygon(sliding_indel_data_polygon$pos,sliding_indel_data_polygon$perc_intergenic*100,col = ig_col, border = ig_col, lwd = 1.8)
#Adding legend to the plot
legend_lines_coords_ig<-matrix(data = c(vec[1]*1000+600,94,vec[2]*1000+600,94) ,nrow = 2,ncol = 2,byrow = TRUE)
legend_lines_coords_cds<-matrix(data = c(vec[1]*1000+600,86,vec[2]*1000+600,86) ,nrow = 2,ncol = 2,byrow = TRUE)
lines(legend_lines_coords_ig, col=ig_col, lwd=2.5)
lines(legend_lines_coords_cds, col=cds_col, lwd=2.5)
legend(vec[2]*1000-2000,par("usr")[4]-1.75,legend=c("Intergenic regions","CDS"),plot=T,bty="n",cex = 0.9, y.intersp = 1.15, adj = c(0,0.5))
##moved plots a little closer together in inkscape and added pi greek symbol to axis
|
64c3f30b4e6490701ab57045693ee57e36e420f1
|
231669037e1b1d0d0967a1e7961a81ad5b43c871
|
/Analiza_Roznicowa/000_filterData.R
|
c7d072233a398b96a6bf9fa32dd6e82cc8d34bc9
|
[] |
no_license
|
MarcinKosinski/SymbiozaV
|
1ac95d1cf59a2dfd61e74bde37a50684b016fdbe
|
727c661b7686a6a8d278b778f08ef477e24aaabb
|
refs/heads/master
| 2021-01-17T12:37:13.807092
| 2016-06-14T12:03:19
| 2016-06-14T12:03:19
| 56,577,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,070
|
r
|
000_filterData.R
|
# 0.
# - Wyczyścić dane:
# Filer the data of the low variability ,
# by removing every column that has 0 in each sample,
# removing every row that has 0 in 70% in samples,
# proceeding with Median Absolute Deviation (MAD) analysis for each row,
# (removing 10% bottom retroelements)
# 0a) Filer the data of the low variability retroelements,
# by removing every column that has 0 in each sample,
which(colSums(BRCA.rnaseq[,-1]) == 0)
# 0b) removing every row that has 0 in 70% in samples,
(rowSums(BRCA.rnaseq[,-1]) == 0) -> occure0_inROW
ROW2remove <- which(occure0_inROW/nrow(BRCA.rnaseq) >= 0.7)
BRCA.rnaseq[-ROW2remove, ] -> BRCA.rnaseq.2
# 0c) proceeding with Median Absolute Deviation (MAD) analysis for each row,
mad_BRCA.rnaseq.2 <- apply(BRCA.rnaseq.2[,-1], 1, mad)
quantile(mad_BRCA.rnaseq.2, probs = seq(0,1,0.01))
# (removing 10% bottom retroelements)
BRCA.rnaseq.2[which(mad_BRCA.rnaseq.2 > 0), ] -> BRCA.rnaseq.3
write.csv(BRCA.rnaseq.3,
file = "mad_BRCA.rnaseq.2.csv",
quote = FALSE,
row.names = FALSE)
|
9bb0b908238fd1d4dca0fc1f2515cceff339b3cb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/photobiologyLEDs/examples/unknown.Rd.R
|
e216084d1a0f6a3430752ded40125aa31a7a972b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
unknown.Rd.R
|
library(photobiologyLEDs)
### Name: unknown
### Title: Spectral data for LEDs array of unknown manufacturer
### Aliases: unknown
### Keywords: datasets
### ** Examples
unknown
|
cc06fcbcfb96fdf0cbf8a9a583ce6ead355ac15c
|
e07d9677c74091fc500ccbfcbedc90681125c116
|
/man/is.tbl_lazy.Rd
|
fe3ca34f33231abe2f488df5535d5789cdcccc2c
|
[] |
no_license
|
nathaneastwood/flicker
|
8d735391e0a066a44fc49878faa874168280091b
|
fcc0ef876f8b7a2ecef3a2898e71750920ed9f63
|
refs/heads/master
| 2023-03-06T14:05:36.020803
| 2021-02-21T18:20:26
| 2021-02-21T18:20:26
| 302,949,394
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 323
|
rd
|
is.tbl_lazy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{is.tbl_lazy}
\alias{is.tbl_lazy}
\alias{is.tbl_spark}
\title{Lazy R Object Checks}
\usage{
is.tbl_lazy(x)
is.tbl_spark(x)
}
\arguments{
\item{x}{Any R object.}
}
\value{
A \code{logical(1)}.
}
\description{
Lazy R Object Checks
}
|
dabc70044d6c95a42406be72b509e168b225ef41
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CluMP/examples/PanelPlot.Rd.R
|
63f2154924b0e48bcdf305ae789d6342bb7bbed2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
PanelPlot.Rd.R
|
library(CluMP)
### Name: PanelPlot
### Title: Plot Micro-Panel (longitudinal) Data
### Aliases: PanelPlot
### Keywords: CLUMP
### ** Examples
dataMale <- GeneratePanel(n = 50, Param = ParamLinear, NbVisit = 10)
dataMale$Gender <- "M"
dataFemale <- GeneratePanel(n = 50, Param = ParamLinear, NbVisit = 10)
dataFemale$ID <- dataFemale$ID + 50
dataFemale$Gender <- "F"
data <- rbind(dataMale, dataFemale)
PanelPlot(data = data, formula = Y ~ Time, group = "ID", color = "Gender")
|
060415b011f6b31f229bc127d6a9fea596422798
|
39b5aec17aef454545d157e12064e443ef9d20d3
|
/man/fits.Rd
|
af63e69a316724e60ab9fb4a5a0e2ead1bd979c8
|
[] |
no_license
|
skoval/anoint
|
1a311d38133453c7f045eda3d664a2913957ac8c
|
dbf1118fe5ea34417f84d5e04b59fd0cb919ec99
|
refs/heads/master
| 2021-01-10T21:53:06.098625
| 2015-07-19T08:26:29
| 2015-07-19T08:26:29
| 38,982,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 497
|
rd
|
fits.Rd
|
\name{fits}
\docType{methods}
\alias{fits}
\alias{fits,anoint.fit-method}
\title{Extract fits from \code{anoint.fit} object}
\description{
Extract the specified \code{anoint} model from a \code{anoint.fit} object.
}
\section{Methods}{
\describe{
\item{fits}{\code{signature(object = "anoint.fit",type=c("obo","uim","pim.exact","pim.approx")}:
Extracts the specified fitted object from a \code{anoint.fit}.
}
}
}
\author{S. Kovalchik \email{s.a.kovalchik@gmail.com}}
\keyword{methods}
|
3db4194ebe030a486098be2e97ce7ac763f6e745
|
34bc8f7b265cc33ada6ea177ad964cbd5aa486d1
|
/functions.R
|
a140694e55aa0272d33bc6a687ffde28895b4b4f
|
[] |
no_license
|
nxskok/ratings
|
7f8f31bc3853f9dc1b27b9ca722ce4d3ccba6f07
|
681f13953ac82e8434853c61f298cc8b3362eb17
|
refs/heads/master
| 2022-11-10T12:16:07.073934
| 2020-06-25T22:15:50
| 2020-06-25T22:15:50
| 274,554,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,625
|
r
|
functions.R
|
## functions.R
# functions for making prior and posterior
# given country name, return all leagues in that country
country_to_leagues=function(games,country) {
games %>% select(comp, comp_name, time_stamp, country) %>%
group_by(comp) %>%
summarize(nm=min(comp_name),nt=min(time_stamp),nc=min(country)) %>%
filter(str_detect(tolower(nc), tolower(country))) %>% pull(nm) %>% unique() %>% sort()
}
# given country and league name, return all seasons of that with numbers
# nt is the *first* game date
country_to_league_number=function(games,ctry,league_name) {
games %>% filter(comp_name == league_name) %>%
filter(str_detect(country, ctry)) %>%
group_by(comp) %>%
summarize(nm=min(comp_name),nt=min(time_stamp),nc=max(country),nm=n()) %>%
arrange(desc(nt)) %>%
filter(nc == country)
}
# return all the games in given comp number (which can be a vector)
comp_games=function(games,comp_number) {
games %>% filter(comp %in% comp_number)
}
# make a lookup table of all the teams in the league
team_lookup_table=function(the_games) { # the_games is output from comp_games
team_names=with(the_games,c(t1_name,t2_name))
team_names=sort(unique(team_names))
tibble(team=team_names) %>% mutate(id=row_number())
}
# create input to use for Stan
make_stan=function(the_games,lookup_table) {
the_games %>%
left_join(lookup_table,by=c("t1_name"="team")) %>%
left_join(lookup_table,by=c("t2_name"="team")) %>%
separate(score,c("s1","s2"),convert=T) %>%
mutate(s1=as.numeric(s1), s2=as.numeric(s2)) %>%
select(t1=id.x, t2=id.y, s1, s2) %>%
drop_na(s2)
}
# run stan to create prior for future seasons
make_stan_prior=function(games,comp_id,save_name,iterations=10000) {
games %>% comp_games(comp_id) -> gg
gg %>% team_lookup_table() -> lookup_table
gg_stan=make_stan(gg, lookup_table)
nt=nrow(lookup_table)
with(gg_stan,list(
nt=nt,
ng=nrow(gg_stan),
x=cbind(t1,t2),
y=cbind(s1,s2),
prior_o_mean=rep(0,nt),
prior_o_sd=rep(1,nt),
prior_d_mean=rep(0,nt),
prior_d_sd=rep(1,nt),
prior_h_mean=0,
prior_h_sd=1
)) ->
stan_prior_data
p.sc=readRDS("psc.rds")
ans=sampling(p.sc,stan_prior_data,iter=iterations)
rat=extract(ans)
tibble(id=1:nt,
o=apply(rat$o,2,mean),
os=apply(rat$o,2,sd),
d=apply(rat$d,2,mean),
ds=apply(rat$d,2,sd),
h=mean(rat$h),
hs=sd(rat$h)
) %>%
left_join(lookup_table) %>%
select(team,id,everything()) %>%
write_csv(save_name)
}
# league table
table_of=function(games,lg_number) {
games %>%
filter(comp %in% lg_number) %>%
filter(str_detect(score," - "))%>%
separate(score,c("s1","s2"),sep=" - ",convert=T) %>%
select(t1_name,t2_name,s1,s2) %>%
mutate(result=case_when(
s1>s2 ~ "W",
s1==s2 ~ "D",
s1<s2 ~ "L",
TRUE ~ "X"
)) %>%
mutate(pt1=case_when(
result=="W" ~ 3,
result=="D" ~ 1,
result=="L" ~ 0
)) %>%
mutate(pt2=case_when(
result=="W" ~ 0,
result=="D" ~ 1,
result=="L" ~ 3
)) ->
d
d %>% group_by(t1_name) %>%
summarize(P=n(),
GD=sum(s1)-sum(s2),
Pt=sum(pt1)
) ->
tab1
d %>% group_by(t2_name) %>%
summarize(P=n(),
GD=sum(s2)-sum(s1),
Pt=sum(pt2)
) ->
tab2
tab1 %>% left_join(tab2,by=c("t1_name"="t2_name")) %>%
mutate(P=P.x+P.y,GD=GD.x+GD.y,Pt=Pt.x+Pt.y) %>%
select(t1_name,P,GD,Pt) %>%
arrange(desc(Pt),desc(GD))
}
get_prior=function(priorname) {
read_csv(priorname)
}
make_posterior=function(prior,league_number,iterations=10000,multiplier=1.5) {
games %>% filter(comp %in% league_number) -> current_games
current_games %>%
filter(str_detect(score," - ")) %>%
separate(score,c("s1","s2"),sep=" - ",convert=T) %>%
select(t1_name,t2_name,s1,s2) -> gg
if (nrow(gg)==0) { # no games yet
# pull off first row from current_games and fill in 2-1 score for it
current_games %>% slice(1) %>%
mutate(s1=2, s2=1) %>%
select(t1_name,t2_name,s1,s2) ->
gg
}
# get rid of any spaces at front or back of team names
gg %>% mutate_at(vars(ends_with("name")), ~trimws(.)) -> gg
# the below should squeal if I got any team names wrong in prior
gg %>% left_join(prior,by=c("t1_name"="team")) %>%
left_join(prior,by=c("t2_name"="team")) %>%
select(t1=id.x,t2=id.y,s1,s2) ->
post_stan
# print(post_stan) # any NA in here indicate problems in prior
post_data=with(post_stan,list(
nt=nrow(prior),
ng=nrow(post_stan),
x=cbind(t1,t2),
y=cbind(s1,s2),
prior_o_mean=prior$o,
prior_o_sd=multiplier*prior$os,
prior_d_mean=prior$d,
prior_d_sd=multiplier*prior$ds,
prior_h_mean=prior$h[1],
prior_h_sd=multiplier*prior$hs[1]
))
X=with(post_stan, cbind(t1,t2, s1, s2))
if (any(is.na(X))) {
print("NAs in x")
return(X)
}
p.sc=readRDS("psc.rds")
post_now=sampling(p.sc,data=post_data,iter=iterations)
}
display=function(post,prior) {
rat=extract(post)
tibble(id=1:nrow(prior),
o=apply(rat$o,2,mean),
d=apply(rat$d,2,mean),
h=mean(rat$h)
) %>%
left_join(prior,by=c("id")) %>%
select(team,id,o=o.x,d=d.x) %>%
mutate(sum=o+d,diff=o-d) %>%
arrange(desc(sum))
}
post_df=function(prior,post_thing) { # needs fixing for RL. Does it work for soccer? Am I using it? I suspect I need to redo priors like for soccer. Or something.
rat=extract(post_thing)
tibble(id=1:nrow(prior),
o=apply(rat$o,2,mean),
d=apply(rat$d,2,mean),
h=mean(rat$h)) %>%
left_join(prior,by=c("id")) %>%
select(name,id,o=o.x,d=d.x) %>%
mutate(sum=o+d,diff=o-d) %>%
arrange(desc(sum))
}
graph_it=function(prior, pre_post, post) {
df1=post_df(prior, post)
df0=post_df(prior, pre_post)
bind_rows(df1, df0, .id="which") -> d # 1 is new, 2 is old
d %>% group_by(which) %>% summarize(mm=mean(sum)) -> means
d %>% left_join(means) %>% # look up right mean for "which"
mutate(sum=sum-mm) %>%
mutate(which=ifelse(which==1, "now", "previous")) %>%
mutate(mylab=ifelse(which=="now", team, "")) -> d2
ggplot(d2,aes(x=diff,y=sum,colour=which,label=mylab))+
geom_point()+
geom_path(aes(group=team),colour="darkgreen", arrow=arrow(type="closed", ends="first", length=unit(0.06, "inches")))+
geom_text_repel(colour="black") +
xlab("Openness") + ylab("Quality") + ggtitle(str_to_title(namelc))
}
###################################################################
# added 2019-09-02
name_lookup_table=function(games, lno) {
games %>% comp_games(lno) -> d
d %>% select(sw_id=t1, name=t1_name) -> h
d %>% select(sw_id=t2, name=t2_name) -> a
h %>% bind_rows(a) %>%
group_by(sw_id) %>%
count(name) %>%
top_n(n=1, wt=n) %>%
ungroup() %>%
mutate(stan_id=row_number()) %>%
select(-n)
}
team_changes=function(games, lno_old, lno_new) {
names_old=name_lookup_table(games, lno_old)
names_new=name_lookup_table(games, lno_new)
names_old
names_old %>% anti_join(names_new,by="sw_id") -> removed # removed from
names_new %>% anti_join(names_old,by="sw_id") -> added # added to
list(removed=removed, added=added)
}
make_stan_prior_2=function(games,comp_id,save_name,iterations=10000) {
games %>% comp_games(comp_id) -> gg
games %>% name_lookup_table(comp_id) -> lookup_table
# gg_stan=make_stan(gg, lookup_table)
gg %>% select(t1, t2, score) %>% filter(str_detect(score, " - ")) %>%
left_join(name_lookups, by=c("t1"="sw_id")) %>%
left_join(name_lookups, by=c("t2"="sw_id")) -> looked_up
looked_up %>%
select(starts_with("stan")) %>% as.matrix() -> stan_X
looked_up %>% select(score) %>%
separate(score, into=c("z1", "z2", "z3", "z4"), fill = "right") %>%
mutate(s1=ifelse(is.na(z4), as.numeric(z1), as.numeric(z2)),
s2=ifelse(is.na(z4), as.numeric(z2), as.numeric(z3))) %>%
select(s1, s2) %>% as.matrix() -> stan_y
nt=nrow(lookup_table)
stan_prior_data=list(
nt=nt,
ng=nrow(stan_X),
x=stan_X,
y=stan_y,
prior_o_mean=rep(0,nt),
prior_o_sd=rep(1,nt),
prior_d_mean=rep(0,nt),
prior_d_sd=rep(1,nt),
prior_h_mean=0,
prior_h_sd=1
)
p.sc=readRDS("psc.rds")
ans=sampling(p.sc,stan_prior_data,iter=iterations)
rat=extract(ans)
tibble(id=1:nt,
o=apply(rat$o,2,mean),
os=apply(rat$o,2,sd),
d=apply(rat$d,2,mean),
ds=apply(rat$d,2,sd),
h=mean(rat$h),
hs=sd(rat$h)
) %>%
left_join(lookup_table, by=c("id"="stan_id")) %>%
select(everything()) %>%
write_csv(save_name)
}
make_posterior_2=function(prior,league_number,iterations=10000,multiplier=1.5) {
games %>% filter(comp %in% league_number) -> current_games
current_games %>%
filter(str_detect(score," - ")) %>%
separate(score, into=c("z1", "z2", "z3", "z4"), fill = "right") %>%
mutate(s1=ifelse(is.na(z4), as.numeric(z1), as.numeric(z2)),
s2=ifelse(is.na(z4), as.numeric(z2), as.numeric(z3))) %>%
select(t1, t2, s1, s2) -> gg
if (nrow(gg)==0) { # no games yet
# pull off first row from current_games and fill in 2-1 score for it; this fails if there are no games at all
current_games %>% slice(1) %>%
mutate(s1=2, s2=1) %>%
select(t1,t2,s1,s2) ->
gg
}
# look up stan team ids from sw ids
gg %>% left_join(prior, by=c("t1"="sw_id")) %>%
left_join(prior, by=c("t2"="sw_id")) %>%
select(t1, t2, id.x, id.y, s1, s2) -> post_stan
# print(post_stan) # any NA in here indicate problems in prior
post_data=with(post_stan,list(
nt=nrow(prior),
ng=nrow(post_stan),
x=cbind(id.x, id.y),
y=cbind(s1, s2),
prior_o_mean=prior$o,
prior_o_sd=multiplier*prior$os,
prior_d_mean=prior$d,
prior_d_sd=multiplier*prior$ds,
prior_h_mean=prior$h[1],
prior_h_sd=multiplier*prior$hs[1]
))
print(post_data)
X=with(post_stan, cbind(t1,t2, s1, s2))
print(X)
if (any(is.na(X))) {
print("NAs in x")
return(X)
}
p.sc=readRDS("psc.rds")
# post_now=sampling(p.sc,data=post_data,iter=iterations, refresh=-1)
post_now=sampling(p.sc,data=post_data,iter=iterations)
}
display_2=function(post,team_names) {
rat=extract(post)
tibble(id=1:nrow(prior),
o=apply(rat$o,2,mean),
d=apply(rat$d,2,mean),
h=mean(rat$h)
) %>%
left_join(team_names,by=c("id"="stan_id")) %>%
select(name, everything()) %>%
mutate(sum=o+d,diff=o-d) %>%
arrange(desc(sum))
}
|
208f20bef33e1e074da50cfb3666d653e94bb9ea
|
663763cee873e142ec8da64a9eac151f091bd2a3
|
/man/wildtype_mutant_pnpp.Rd
|
710af108e9db65246caee36f9d31e3b78f83c2c8
|
[] |
no_license
|
cran/ddpcr
|
1121c4066d93281cb003f789cf18f4663122e624
|
e0658fb695a76172c00922987568358372ad3c8e
|
refs/heads/master
| 2023-09-02T04:53:58.533329
| 2023-08-20T22:32:32
| 2023-08-20T23:31:02
| 52,090,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,829
|
rd
|
wildtype_mutant_pnpp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/type-wildtype_mutant_pnpp.R
\name{wildtype_mutant_pnpp}
\alias{wildtype_mutant_pnpp}
\title{Plate type: wildtype/mutant PNPP}
\description{
A plate of type \code{wildtype_mutant_pnpp} is a subtype of
\code{\link[ddpcr]{pnpp_experiment}} that assumes the double-positive cluster
denotes wildtype and the other non-empty cluster denotes mutant droplets.
There are two plate types that are subtypes of \code{wildtype_mutant_pnpp}:
\code{\link[ddpcr]{fam_positive_pnpp}} and \code{\link[ddpcr]{hex_positive_pnpp}}.
It is not recommended to use this type directly; instead you should use one
of the subtypes.
}
\details{
Plates with this type have the following analysis steps: \code{INITIALIZE},
\code{REMOVE_FAILURES}, \code{REMOVE_OUTLIERS}, \code{REMOVE_EMPTY},
\code{CLASSIFY}, \code{RECLASSIFY}.
Plates with this type have the following droplet clusters:
\code{UNDEFINED}, \code{FAILED}, \code{OUTLIER}, \code{EMPTY} (double-negative),
\code{RAIN} (not empty but not wildtype nor negative), \code{POSITIVE} (wildtype),
\code{NEGATIVE} (mutant).
\href{https://github.com/daattali/ddpcr#advanced-topic-3-creating-new-plate-types}{See the README} for
more information on plate types.
}
\examples{
\dontrun{
plate <- new_plate(sample_data_dir(), type = plate_types$wildtype_mutant_pnpp)
type(plate)
}
}
\seealso{
\code{\link[ddpcr]{plate_types}}\cr
\code{\link[ddpcr]{fam_positive_pnpp}}\cr
\code{\link[ddpcr]{hex_positive_pnpp}}\cr
\code{\link[ddpcr]{pnpp_experiment}}\cr
\code{\link[ddpcr]{analyze}}\cr
\code{\link[ddpcr]{remove_failures}}\cr
\code{\link[ddpcr]{remove_outliers}}\cr
\code{\link[ddpcr]{remove_empty}}\cr
\code{\link[ddpcr]{classify_droplets}}\cr
\code{\link[ddpcr]{reclassify_droplets}}
}
|
14e4bd9cddc931c31b35935b8d9894dfc2aac4be
|
2249a7339774fbe08d165225a68ddb32569ee67e
|
/Machine Learning A-Z Template Folder/Part 2 - Regression/Section 5 - Multiple Linear Regression/Exercises/Multiple_Linear_Regression.R
|
47d191c19057a7db7de7c9b562c62d134bd45bf7
|
[] |
no_license
|
Timothy-Kornish/MachineLearningWithPythonAndR
|
790001ec8310ffc3ec2e6fdf9813335eda5162f3
|
096884cadd6fc372c83720b0a435ed8a2324d885
|
refs/heads/master
| 2021-09-05T15:08:06.088726
| 2018-01-29T05:25:14
| 2018-01-29T05:25:14
| 113,071,852
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,978
|
r
|
Multiple_Linear_Regression.R
|
#Multiple Linear Regression
library(caTools)
#-------------------------------------------------------------------------------
# Loading in Data
#-------------------------------------------------------------------------------
dataset = read.csv('50_Startups.csv')
print('-------------------------------------------------------------------')
print(dataset)
print('-------------------------------------------------------------------')
summary(dataset)
print('-------------------------------------------------------------------')
#-------------------------------------------------------------------------------
# Encoding Data
#-------------------------------------------------------------------------------
dataset$State = factor(dataset$State,
levels = c('New York', 'California', 'Florida'),
labels = c(1, 2, 3))
summary(dataset)
print('-------------------------------------------------------------------')
#-------------------------------------------------------------------------------
# train test split
#-------------------------------------------------------------------------------
set.seed(123)
split = sample.split(dataset, SplitRatio = 0.8)
training_set = subset(dataset, split = TRUE)
test_set = subset(dataset, split = FALSE)
#-------------------------------------------------------------------------------
# Fitting Multiple Linear Regression Model for all independent variables
#-------------------------------------------------------------------------------
regressor = lm(formula = Profit ~ ., data = training_set )
summary(regressor)
print('-------------------------------------------------------------------')
#-------------------------------------------------------------------------------
# Predicting the test set results
#-------------------------------------------------------------------------------
y_prediction = predict(regressor, newdata = test_set)
summary(y_prediction)
print('-------------------------------------------------------------------')
#-------------------------------------------------------------------------------
# Building the optimal model using Backward Elimination with multiple Linear Regression
#-------------------------------------------------------------------------------
back_regressor = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend + State,
data = dataset)
summary(back_regressor)
print('-------------------------------------------------------------------')
back_regressor = lm(formula = Profit ~ R.D.Spend + Marketing.Spend,
data = dataset)
summary(back_regressor)
print('-------------------------------------------------------------------')
back_regressor = lm(formula = Profit ~ R.D.Spend, data = dataset)
summary(back_regressor)
print('-------------------------------------------------------------------')
|
25f6fbe22c7d0647f57e5ea2c8ababcaad3f2bd4
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Kronegger-Pfandler-Pichler/bomb/p20-5.pddl_planlen=1/p20-5.pddl_planlen=1.R
|
37b2f29951d3b65e053ea8268d4a29d90ff23f23
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,363
|
r
|
p20-5.pddl_planlen=1.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 5771
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5591
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5591
c
c Input Parameter (command line, file):
c input filename QBFLIB/Kronegger-Pfandler-Pichler/bomb/p20-5.pddl_planlen=1.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 305
c no.of clauses 5771
c no.of taut cls 100
c
c Output Parameters:
c remaining no.of clauses 5591
c
c QBFLIB/Kronegger-Pfandler-Pichler/bomb/p20-5.pddl_planlen=1.qdimacs 305 5771 E1 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 29 30 31 32 33 35 36 37 40 41 42 43 44 45 48 49 50 51 52 54 55 56 57 59 61 62 63 64 65 67 68 71 72 73 75 78 79 80 81 82 83 85 86 87 88 90 91 93 94 95 96 97 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305] 100 20 110 5591 RED
|
0b67324d65226182fdba8f1a1df70e4772160bb3
|
18f3e432f4c82d2a6f66789393668d31a1e9a175
|
/man/pk.tss.Rd
|
c9dd7352e2c9c05781965c96c49b088078e721a5
|
[] |
no_license
|
ksl31/pknca
|
39490c48c87a41127582706acc7d007fc8664b2a
|
5f2743cd46b33a67c2da7f78a928447e1fd0d895
|
refs/heads/master
| 2021-04-26T23:28:19.193889
| 2018-02-17T21:58:38
| 2018-02-17T21:58:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 772
|
rd
|
pk.tss.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tss.R
\name{pk.tss}
\alias{pk.tss}
\title{Compute the time to steady-state (tss)}
\usage{
pk.tss(..., type = c("monoexponential", "stepwise.linear"), check = TRUE)
}
\arguments{
\item{\dots}{Passed to \code{\link{pk.tss.monoexponential}} or
\code{\link{pk.tss.stepwise.linear}}.}
\item{type}{The type of Tss to calculate, either
\code{stepwise.linear} or \code{monoexponential}}
\item{check}{See \code{\link{pk.tss.data.prep}}}
}
\value{
A data frame with columns as defined from
\code{pk.tss.monoexponential} and/or \code{pk.tss.stepwise.linear}.
}
\description{
Compute the time to steady-state (tss)
}
\seealso{
\code{\link{pk.tss.monoexponential}},
\code{\link{pk.tss.stepwise.linear}}
}
|
b33c8bb32bdb7a9ce3b4a2a90d96d1de5bb6ea3e
|
40ce3595e3a5404f7025a627e4ffbbb9d1a0e849
|
/R/signatureDistance.r
|
b2f70e76a042bd8ad21eb6dd4a3efbf326f141ca
|
[] |
no_license
|
joseTamezPena/FRESA.CAD
|
2d3e0f3571714345fe821e9fc969c890d49fdbb0
|
1de0d21c33094be3a6a3bcf6809b2e03a6d0d6a5
|
refs/heads/master
| 2023-09-01T01:20:26.403977
| 2023-08-21T22:16:25
| 2023-08-21T22:16:25
| 184,284,321
| 5
| 0
| null | 2020-12-02T00:17:31
| 2019-04-30T15:10:29
|
R
|
UTF-8
|
R
| false
| false
| 5,867
|
r
|
signatureDistance.r
|
signatureDistance <-
function (template, data=NULL, method = c("pearson","spearman","kendall","RSS","MAN","NB"),fwts=NULL)
{
#given the template: mean,median,sample, etc....;signatureDistance it will return the distance between the template to each row of the dataframe
#the template is a named numeric vector
#the data is a colnamed data frame
#methods:
# RSS: Normalized Root Sum Square
# MAN: Normalized Manhattan distance
# pearson: 2*(1-Pearson correlation coefficient)
# spearman: 2*(1-spearman correlation coefficient)
# kendall: 2*(1-kendall correlation coefficient)
method <- match.arg(method)
theQuant <- c(0.025,0.100,0.159,0.250,0.500,0.750,0.841,0.900,0.975);
samplesize <- 2.0;
if (class(template)[1] == "list")
{
samplesize <- template$samples;
theQuant <- template$quant;
meant <- template$meanv;
sdt <- template$sdv;
template <- template$template;
}
if (is.null(fwts))
{
fwts <- rep(1,ncol(template));
}
wvalues <- 1.0/abs(qt(theQuant,df=samplesize-1));
if (class(template)[1]=="matrix")
{
vnames <- colnames(template);
}
else
{
vnames <- names(template);
}
datasubset <- as.matrix(data[,vnames]);
medianv <- as.integer((length(theQuant) + 1)/2);
ld <- NULL;
ud <- NULL;
qld <- NULL;
qud <- NULL;
if (class(template)[1] == "matrix")
{
tem <- meant;
# cat("median:")
# print(tem)
wts <- numeric(length(tem));
ld <- numeric(length(tem));
for (i in 1:(medianv - 1))
{
tdis <- tem - template[i,];
w <- (tdis >= 0)*theQuant[i];
wts <- wts + w;
tdis[tdis < 0] <- 0;
ld <- ld + w*wvalues[i]*tdis;
}
wts[wts == 0] <- 1.0e-10;
ld <- ld/wts;
tdis <- tem - template[medianv - 2,];
tdis[tdis < 0] <- 0;
qld <- tdis*wvalues[medianv - 2];
wts <- numeric(length(tem));
ud <- numeric(length(tem));
for (i in (medianv + 1):length(wvalues))
{
tdis <- template[i,] - tem;
w <- (tdis >= 0)*(1.0-theQuant[i]);
wts <- wts + w;
tdis[tdis < 0] <- 0;
ud <- ud + w*wvalues[i]*tdis;
}
wts[wts == 0] <- 1.0e-10;
ud <- ud/wts;
tdis <- template[medianv + 2,] - tem;
tdis[tdis < 0] <- 0;
qud <- tdis*wvalues[medianv + 2];
ld[ld == 0] <- 0.5*ud[ld == 0];
ld[ld == 0] <- sdt[ld == 0];
ld[ld == 0] <- 0.25;
qld[qld == 0] <- ld[qld == 0];
ud[ud == 0] <- 0.5*ld[ud == 0];
ud[ud == 0] <- sdt[ud == 0];
ud[ud == 0] <- 0.25;
qud[qud == 0] <- ud[qud == 0];
# cat("ld:")
# print(ld)
# cat("qld:")
# print(qld)
# cat("ud:")
# print(ud)
# cat("qud:")
# print(qud)
}
else
{
tem <- template;
ld <- sd(template);
ld[ld == 0] <- 0.25;
ud <- ld;
qld <- IQR(template)/abs(qnorm(0.25))/2;
qld[qld == 0] <- ld[qld == 0];
qud <- qld;
}
switch(method,
NB =
{
whichmin <- function (x)
{
minidx <- as.integer(median(which.min(x)));
return (minidx);
}
NBDistance <- function (x,template,nPDF,wts,dff,center)
{
md <- template;
for (ind in 1:nrow(template))
{
md[ind,] <- abs(x-template[ind,]);
}
minidx <- apply(md,2,whichmin);
pval = nPDF[minidx];
for (ds in 1:length(x))
{
dis <- x[ds] - template[minidx[ds],ds];
if (dis != 0)
{
dis2 <- template[minidx[ds],ds] - template[center,ds];
if (dis2 == 0)
{
dis2 <- 0.501*dis;
}
if ((minidx[ds]==1) && (dis < 0))
{
pval[ds] = pval[ds]*(1.0 - dis/(2.0*dis2));
}
else
{
if ((minidx[ds]==length(nPDF)) && (dis > 0))
{
pval[ds] = pval[ds]*(1.0 - dis/(2.0*dis2));
}
else
{
if (dis < 0)
{
dis2 <- (template[minidx[ds]-1,ds]-template[minidx[ds],ds]);
if (dis2 < 0)
{
pval[ds] = pval[ds] + dis*(nPDF[minidx[ds]-1]-pval[ds])/dis2;
}
}
else
{
if (dis > 0)
{
dis2 <- (template[minidx[ds]+1,ds]-template[minidx[ds],ds])
if (dis2 > 0)
{
pval[ds] = pval[ds] + dis*(nPDF[minidx[ds]+1]-pval[ds])/dis2;
}
}
}
}
}
}
}
pval[pval < 1.0e-16] <- 1.0e-16;
logpvals = log(pval);
tsum = sum(wts);
md <- exp(sum(wts*logpvals,na.rm=TRUE)/tsum);
md <- qt(md,df=dff,lower.tail = FALSE);
return (md);
}
center = as.integer((length(theQuant)+1)/2);
nPDF <- 0.5*dnorm(qnorm(theQuant))/dnorm(qnorm(0.5));
metric <- apply(datasubset,1,NBDistance,template,nPDF,fwts,samplesize-1,center);
},
RSS =
{
RSSDistance <- function (x,template,ld,ud,wts)
{
md <- (x-template);
tsum = sum(wts);
md <- sqrt(sum(wts*(pmax(md/ud,-md/ld)^2),na.rm=TRUE)/tsum);
return (md)
}
metric <- apply(datasubset,1,RSSDistance,tem,ld,ud,fwts);
},
MAN =
{
manDistance <- function (x,template,ld,ud,wts)
{
md <- (x-template)*wts;
tsum = sum(wts);
md <- sum(pmax(md/ud,-md/ld),na.rm=TRUE)/tsum;
return (md)
}
metric <- apply(datasubset,1,manDistance,tem,qld,qud,fwts);
},
{
corDistance <- function (x,template,method) {md <- 3.0*(1.0-cor(x,template,method=method,use="pairwise.complete.obs")); return (md)}
if (class(template)[1]=="matrix")
{
metric <- numeric(nrow(datasubset));
swts <- 0;
for (i in 1:length(theQuant))
{
tem <- template[i,];
wts <- theQuant[i];
if (wts > 0.5)
{
wts <- 1.0-wts;
}
metric <- metric + wts*(apply(datasubset,1,corDistance,template=tem,method=method));
swts <- swts + wts;
}
metric <- metric/swts;
}
else
{
tem <- template;
metric <- apply(datasubset,1,corDistance,template=tem,method=method);
}
}
)
names(metric) <- rownames(data);
attr(metric,"ld") <- ld;
attr(metric,"ud") <- ud;
attr(metric,"qld") <- qld;
attr(metric,"qud") <- qud;
metric[is.na(metric)] <- 1.0e10;
result <- metric
return (result);
}
|
48925429e79aae2f4d8f30a2600c248b0cf68dc2
|
1859f328ad9ff15d7ebc5491ef99879346d7e707
|
/R/utils-tests.R
|
b94fe25e388cdb99e4f1635de40ed604e6cb6f6c
|
[] |
no_license
|
grattan/covid19.model.sa2
|
c5671e92de0fa3caeaf32da51a62cc45856146b7
|
980f73e7a14ead9caa921ec703928d5c6ed4f028
|
refs/heads/master
| 2022-11-27T06:37:19.072469
| 2020-08-09T16:52:25
| 2020-08-09T16:52:25
| 255,219,008
| 4
| 0
| null | 2020-06-25T10:43:55
| 2020-04-13T03:07:13
|
TeX
|
UTF-8
|
R
| false
| false
| 804
|
r
|
utils-tests.R
|
# Useful for tests to have a 'typically' generated file
read_typical <- function() {
# S$Statuses[, .SD, .SDcols = c(names(S$Statuses)[seq_len(which_first(names(S$Statuses) == "V1"))])] %>% drop_cols(c("pid", "Resistance", "school_id", "work_dzn", "seqN", "HouseholdSize", "V1")) %>% {lapply(X = 1:9, FUN = function(s) invisible(write_fst(.[state == s][, state := NULL], provide.file(paste0("inst/extdata/examples/", "typical-aus-state-", s, ".fst")), compress = 100)))}
out <-
rbindlist(lapply(1:9, function(s) {
read_sys(paste0("examples/typical-aus-state-", s, ".fst"))
}),
idcol = "state",
use.names = TRUE,
fill = TRUE)
out[, "pid" := seq_len(.N)]
setkeyv(out, c("state", "sa2", "hid", "pid"))
out[, c("seqN", "HouseholdSize") := do_seqN_N(hid, pid)]
out
}
|
4ec91e67ed310a7baf572c5c6eb0c6a223f317fb
|
969d4316ad794a0eef0213b01a7b06ddfdf8d90d
|
/09_functionals/06_family/exercise2.r
|
ce3d196ce602fa1e653e2f93c4f59cdfd7fa219b
|
[] |
no_license
|
Bohdan-Khomtchouk/adv-r-book-solutions
|
adaa5b5f178999d130aff1359a23e978e39e86ae
|
e1b3a63c0539de871728b522604110c0aa18c7d1
|
refs/heads/master
| 2021-01-22T00:36:17.450660
| 2015-12-06T02:54:02
| 2015-12-06T02:54:02
| 47,481,353
| 1
| 1
| null | 2015-12-06T02:49:46
| 2015-12-06T02:49:46
| null |
UTF-8
|
R
| false
| false
| 474
|
r
|
exercise2.r
|
### Create a table that has and, or, add, multiply, smaller, and larger in the columns and binary operator, reducing variant, vectorised variant, and array variants in the rows.
### Fill in the cells with the names of base R functions that perform each of the roles.
#TODO:
### Compare the names and arguments of the existing R functions. How consistent are they? How could you improve them?
#TODO:
### Complete the matrix by implementing any missing functions.
#TODO:
|
501a25571514ccc65c09aed30c9aac8bd75f3e41
|
44cf65e7ab4c487535d8ba91086b66b0b9523af6
|
/data/Newspapers/2002.07.09.editorial.90423.0930.r
|
ad8f67b685988ffb1aca8c7ed5074c91380689f4
|
[] |
no_license
|
narcis96/decrypting-alpha
|
f14a746ca47088ec3182d610bfb68d0d4d3b504e
|
5c665107017922d0f74106c13d097bfca0516e66
|
refs/heads/master
| 2021-08-22T07:27:31.764027
| 2017-11-29T12:00:20
| 2017-11-29T12:00:20
| 111,142,761
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,567
|
r
|
2002.07.09.editorial.90423.0930.r
|
Joita Tanase , procurorul general , colegul din tinerete al lui Adrian Nastase si un fel de cal breaz al Justitiei de astazi , a inaintat recurs in anulare pentru a ingropa decizia de faliment in cazul Bancii Internationale a Religiilor .
domnul Joita Tanase ( sau viceversa ) poate inainta orice fel de recurs .
daca are chef , poate cere Justitiei sa constate ca Mihai Viteazul nu e mort sau ca Banca Romana de Scont a lui Ovidiu Vintu e mai infloritoare ca niciodata . !
exact cind vrea muschii lui ( conform unei exprimari dragi poporului roman ) .
in fond , ceea ce a constatat o comisie parlamentara condusa de Radu Ciuceanu ( de la PRM ) reprezinta un serios temei pentru o asemenea actiune !
sint cel putin trei - patru personaje interesate sa se faca respiratie artificiala Bancii Internationale a Religiilor .
unul trebuie sa fie printre fratii Paunescu .
ei scriu de mai multa vreme in ziarul familiei ca BIR nu e moarta si ca BNR a comis un abuz .
din cite stim , clanul Paunescu ( cel care vrea si de la " Evenimentul zilei " o despagubire de peste 50 de milioane de dolari pentru a - si acoperi gaurile si datoriile ) nu este actionar la respectiva banca .
atunci , de ce sustine mortis resuscitarea unei banci din care n - a mai ramas decit gaoacea ?
raspunsul nu e greu de dat .
daca tine povestea cu " mortul " de la BIR , dupa acelasi principiu se poate trece la anularea falimentului Bankcoop .
nu erau acolo " fratii " nostri niste protagonisti ?
daca ne amintim bine , parca da .
si n - ar mai fi decit o mustata de soricel pina la incercarea " istorica " de a resuscita si Bancorex .
stiti care , stiti datoria , stiti povestea cu activele de la AVAB !
si ne - am trezi peste noapte prin 1994 , cind se puteau lua bani cu un telefon dat din Piata Victoriei sau de la SRI , sau de prin alte parti .
care va sa zica , procurorul general vrea o rejudecare in cazul Bancii Internationale a Religiilor !
cum el nu are habar de contabilitate , e firesc sa nu tina cont de un raport de audit ( adica de expertiza ) in care se spunea ca banca era indatorata pina - n git .
nu are cum sa priceapa ca o banca nu poate fi declarata super - atlet financiar , cita vreme ea nu mai are bani si nici oamenii nu o mai cred vie .
poate sa vina domnul Joita si cu decizie de la Tribunalul International de la Haga , cetateanul roman nu va depozita in BIR nici un bilet de tramvai .
mai mult , povestea halucinanta a acestei banci ( cea care a dat milioane de dolari pe niste gablonzuri si pe un contract de fotbalist ) aduce acum in prim - plan comportamentul celor care au condus - o .
intr - o reactie ca de zarzavagii veniti cu verzituri in Piata Matache , ei au cerut ori un imprumut de la Ministerul Finantelor , ori ca banca sa fie preluata la Trezoreria Statului .
acest demers seamana cu cel al parlamentarului aflat la putere care s - a dus cu un prieten la Banca Nationala a Romaniei si le - a zis oamenilor de acolo , ia sa - i dati astuia un credit de citeva milioane de dolari ca vrea sa exporte niste porci in China !
toate somitatile financiare , inclusiv Asociatia Romana a Bancilor , atrag atentia asupra stupiditatii operatiunii declansate de procurorul general al Romaniei .
Paunestii si Radu Ciuceanu se ciucesc indaratul recursului , iar noi sintem chemati sa impartim un proiect de aventura financiara .
miine - poimiine , Radu Ciuceanu va cere resuscitarea Elenei Ceausescu .
Joita Tanase ( sau viceversa ) va introduce recurs in anulare .
iar restul bancilor inmormintate vor astepta si ele cuminti la coada !
|
544257faf74e34b01d83f642a27e200e7125fbe2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spdep/examples/plot.nb.Rd.R
|
e527906b26095e02474cffe339f2d8d433e472f6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
r
|
plot.nb.Rd.R
|
library(spdep)
### Name: plot.nb
### Title: Plot a neighbours list
### Aliases: plot.nb plot.listw
### Keywords: spatial
### ** Examples
if (require(rgdal, quietly=TRUE)) {
example(columbus, package="spData")
coords <- coordinates(columbus)
plot(col.gal.nb, coords)
title(main="GAL order 1 links with first nearest neighbours in red")
col.knn <- knearneigh(coords, k=1)
plot(knn2nb(col.knn), coords, add=TRUE, col="red", length=0.08)
}
|
3de30c6a497679ce95390d0cc6c2656aa5cc4cf6
|
c459dd32d88158cb064c3af2bc2ea8c7ab77c667
|
/findmarkers/findmarkers_by_celltype/annotate_markers/annotate_cellgroup4_degs_to_cellgroups.R
|
da2efcc30b7e5e45667809196530ecc04dbccc46
|
[] |
no_license
|
ding-lab/ccRCC_snRNA_analysis
|
d06b8af60717779671debe3632cad744467a9668
|
ac852b3209d2479a199aa96eed3096db0b5c66f4
|
refs/heads/master
| 2023-06-21T15:57:54.088257
| 2023-06-09T20:41:56
| 2023-06-09T20:41:56
| 203,657,413
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,019
|
r
|
annotate_cellgroup4_degs_to_cellgroups.R
|
# Yige Wu @WashU Aug 2020
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
source("./ccRCC_snRNA_analysis/plotting.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input the DEG list
genes_df <- fread(input = "./Resources/Analysis_Results/findmarkers/findmarkers_by_celltype/filter_markers/filter_markers_wilcox_bygroup/20200916.v1/findallmarkers_wilcox_bycellgroup.pos.logfcthreshold0.1.minpct0.1.mindiffpct0.1.Top50avg_logFC.tsv", data.table = F)
# write output ------------------------------------------------------------
|
bb06333591dcc773fa36913ce378202eaab089ba
|
b9015fad42a295b16fc041cba41b0475612948b3
|
/preprocessing/interpolation_epa_subset_others.R
|
822ba7897c0bc7e3d5d2f59012a14146877d80b7
|
[
"MIT"
] |
permissive
|
rgualan/soton-data-science-thesis
|
894b228c842c480a2da22aaa9ada4c6ea6204472
|
dc50e7bc73e6b63bc5b53c10a1ebdfdefafb117b
|
refs/heads/master
| 2021-06-25T09:38:23.210947
| 2017-09-07T11:31:17
| 2017-09-07T11:31:17
| 94,209,694
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,691
|
r
|
interpolation_epa_subset_others.R
|
## Clean environment
rm(list=ls())
## Load required packages ###
library(fields)
library(raster)
library(spatial.tools)
library(gdalUtils)
library(rgdal)
library(gstat)
library(automap)
library(imputation)
library(spTimer)
library(kknn)
source("util/my_helper.R")
## Global variables
paper <- setupPaper()
## Read data ##############################################################################
epa <- readRDS("data/epa/epa_daily/2016/california_ozone_plus_cov.RDS")
## Add date covariates
epa <- addDoyField(epa)
epa <- addDowField(epa)
## Sites
sites <- getSites(epa)
#str(epa)
## Scale target variable
epa$sOzone <- scale(epa$Ozone)[,1] ## scale returns a matrix
## Use stations with data in the chosen covariates
covByStation <- aggregate(cbind(Ozone,Temperature)~Station.Code,epa,length)
covByStation <- covByStation[covByStation$Ozone>350 & covByStation$Temperature>350,]
epa <- epa[epa$Station.Code %in% covByStation$Station.Code,]
#apply(epa,2,function(x){sum(is.na(x))})
## NOTE
## WARNING: Count the amount of data of the variables
## Wind speed and RH, in case they are used as covariates
## Plot "semi-complete" stations and test station
## Chose a test station for injecting missing data
## For the slightly isolated by central position: 107-0009
# sites <- getSites(epa2)
# sites[sites$Longitude> -119 & sites$Longitude< -118.5 & sites$Latitude> 36 & sites$Latitude<37.5, ]
testStation <- "107-0009"
plotStations(paper, covByStation$Station.Code,
"img/preprocessing/idw/complete_stations.jpeg",6,6, testStation)
## Inject a random block of missing data and sparse missing data ###############
## and some random missing data
## sOzone : original (intact)
## sOzone2 : original with injected missing data (predicted)
epa$sOzone2 <- epa$sOzone
set.seed(1345)
randomDay <- sample(getStudyDays(), 1)
epa$sOzone2[epa$Station.Code == testStation &
epa$Date>=randomDay & epa$Date<=(randomDay+15*24*60*60)] <- NA
rds <- sample(getStudyDays(), 10)
for(i in 1:length(rds)){
epa$sOzone2[epa$Station.Code == testStation &
epa$Date>=rds[i] & epa$Date<=(rds[i]+2*24*60*60)] <- NA
}
#plot(sOzone2~Date,epa[epa$Station.Code==testStation,],type="l")
## GBM ####################################################################################
## Design Matrix
#apply(epa[epa$Station.Code==testStation,],2,function(x){sum(is.na(x))})
#apply(epa[epa$Station.Code!=testStation,],2,function(x){sum(is.na(x))})
epaGbm <- epa[!is.na(epa$Temperature),]
X <- epaGbm[,c("sOzone2","Temperature","UTM.X","UTM.Y","Elevation","Location.Setting","Doy","Dow.name")]
#apply(X,2,function(x){sum(is.na(x))})
## Generalized Boosted Regression
ticToc(
gbm.fit <- gbmImpute(X, max.iters = 2, cv.fold = 5, verbose=T)
)
epaGbm$sOzone3 <- gbm.fit$x$sOzone2[,1] ## The output is a matrix
# dim(epaGbm[epaGbm$Station.Code==testStation,])
# plot(sOzone~Date,epaGbm[epaGbm$Station.Code==testStation,],type="l")
# lines(sOzone3~Date,epaGbm[epaGbm$Station.Code==testStation,],col=2)
## Gaussian Process ######################################################################
## First, it is necessary to fill the missing values in Temperature for training
epaGp <- epa
## Simple test
if(F){
epaGp$Temperature[epaGp$Station.Code==testStation &
epaGp$Date>="2016-05-01" & epaGp$Date<="2016-05-15"] <- NA
a <- imputeTS::na.kalman(epaGp$Temperature[epaGp$Station.Code==testStation]) ## Simple imputation
a <- imputeTS::na.ma(epaGp$Temperature[epaGp$Station.Code==testStation]) ## Simple imputation
plot(epaGp[epaGp$Station.Code==testStation,]$Date,a,col=2,type="l")
lines(Temperature~Date,epaGp[epaGp$Station.Code==testStation,],type="l",lwd=2)
}
epaGp$Temperature <- imputeTS::na.ma(epaGp$Temperature)
ticToc(
simpleGp <- spT.Gibbs(
#formula = sOzone2~Temperature+Elevation+Location.Setting+Doy+Dow.number,
formula = sOzone2~Temperature+Elevation+Doy+Dow.number,
model = "GP",
data = epaGp[epaGp$Station.Code!=testStation,],
coords = ~Longitude + Latitude, #scale.transform = "SQRT",
#newdata = epaGp[is.na(epaGp$sOzone2),],
#newcoords = ~Longitude + Latitude,
#time.data = spT.time(366),
spatial.decay = spT.decay(distribution = Gamm(2, 1), tuning = 0.1))
)
simpleGp.pred <- predict(simpleGp, newdata=epaGp[epaGp$Station.Code==testStation,],
newcoords = ~Longitude + Latitude)
## Kernel K-nearest neighbours ##################################################################
# Option a) incomplete
# When using an algorithm where the output depends on distance calculation (as is the case in
# k-nearest-neighbors) it is recommended to first scale the data
# Option b) functional
# This nearest neighbor method expands knn in several directions. First it can be used not only for
# classification, but also for regression and ordinal classification. Second it uses kernel functions
# to weight the neighbors according to their distances. In fact, not only kernel functions but every
# monotonic decreasing will work fine.
# Is used in rattle
epaKnn <- epa[,c("Station.Code","sOzone2","Temperature","Elevation","Doy","Dow.number")]
#epaKnn[,-1] <- scale(epaKnn[,-1])
fit.kknn <- kknn(sOzone2 ~ Temperature+Elevation+Doy+Dow.number,
epaKnn[epaKnn$Station.Code!=testStation & !is.na(epaKnn$sOzone),],
epaKnn[epaKnn$Station.Code==testStation,])
epaKnn$sOzone5[epaKnn$Station.Code==testStation] <- fit.kknn$fitted.values
# Generated with rattle
# Analyze
# randomForest(formula = sOzone2 ~ .,
# data = crs$dataset[, c(crs$input, crs$target)],
# ntree = 2000, mtry = 3, importance = TRUE, replace = FALSE, na.action = randomForest::na.roughfix)
## Test other methods
# fit2 <- SVDImpute(X2, k = 10, num.iters = 10, verbose=F)
# fit2 <- SVTImpute(X2, lambda = 0.1, verbose=F)
# fit2 <- kNNImpute(X, k = 3, verbose = F)
## These methods did not work
## The librayr is not being mantained any more
## Check that the missing data was replaced
# sum(is.na(fit$x)); head(epaGp); head(fit$x)
## Assemble dataframe again ############################################################
test <- epaGbm[epaGbm$Station.Code==testStation,]
test$sOzone4 <- simpleGp.pred$Mean
test$sOzone5 <- epaKnn$sOzone5[epaKnn$Station.Code==testStation]
test2 <- rbind(data.frame(Date=test$Date,Ozone=test$sOzone,Type="Original", Flag=is.na(test$sOzone2)),
data.frame(Date=test$Date,Ozone=test$sOzone3,Type="GBM", Flag=F),
data.frame(Date=test$Date,Ozone=test$sOzone4,Type="GP", Flag=F),
data.frame(Date=test$Date,Ozone=test$sOzone5,Type="KKNN", Flag=F))
printPlot(paper, "img/preprocessing/idw/ts_ozone_others.jpeg",7,4, FUN=function(){
p<-ggplot(test2, aes(x=Date, y=Ozone, colour=Type)) +
annotate("rect",
xmin=test2$Date[test2$Flag]-1*24*60*60,
xmax=test2$Date[test2$Flag]+1*24*60*60,
ymin=-Inf, ymax=Inf, alpha=0.75, fill="lightyellow") +
geom_line() +
theme(legend.position = "top") +
labs(y="Scaled(Ozone)")
print(p)
})
# Calculate
mGbm <- evaluatePredictions(test$sOzone[is.na(test$sOzone2)], test$sOzone3[is.na(test$sOzone2)])
mGp <- evaluatePredictions(test$sOzone[is.na(test$sOzone2)], test$sOzone4[is.na(test$sOzone2)])
mKknn <- evaluatePredictions(test$sOzone[is.na(test$sOzone2)], test$sOzone5[is.na(test$sOzone2)])
metrics <- rbind(mGbm,mGp,mKknn)
rownames(metrics) <- c("gbm","gp","kknn")
round(metrics, digits = 3)
## Notes:
## Imputation is not Interpolation
## So, it shall be tested in a different way
## Not trying to model a whole time series of an station, but only missing portions
|
77d7ad83fb0dac54a08e71b58371538855364a34
|
ff9eb712be2af2fa24b28ecc75341b741d5e0b01
|
/man/EPA.89b.sulfate.df.Rd
|
01dcda4a903c7bdf3c2bc18fe239f9e85a3d7d80
|
[] |
no_license
|
alexkowa/EnvStats
|
715c35c196832480ee304af1034ce286e40e46c2
|
166e5445d252aa77e50b2b0316f79dee6d070d14
|
refs/heads/master
| 2023-06-26T19:27:24.446592
| 2023-06-14T05:48:07
| 2023-06-14T05:48:07
| 140,378,542
| 21
| 6
| null | 2023-05-10T10:27:08
| 2018-07-10T04:49:22
|
R
|
UTF-8
|
R
| false
| false
| 922
|
rd
|
EPA.89b.sulfate.df.Rd
|
\name{EPA.89b.sulfate.df}
\alias{EPA.89b.sulfate.df}
\docType{data}
\title{
Sulfate Concentrations from 1989 USEPA Guidance Document
}
\description{
Sulfate concentrations (mg/L). Nondetects reported as \code{<1450}.
}
\usage{data(EPA.89b.sulfate.df)}
\format{
A data frame with 24 observations on the following 3 variables.
\describe{
\item{\code{Sulfate.orig}}{a character vector of original sulfate concentration (mg/L)}
\item{\code{Sulfate}}{a numeric vector of sulfate concentations with \code{<1450} coded as \code{1450}}
\item{\code{Censored}}{a logical vector indicating which observations are censored}
}
}
\source{
USEPA. (1989b). \emph{Statistical Analysis of Ground-Water Monitoring Data at RCRA Facilities, Interim Final Guidance}.
EPA/530-SW-89-026. Office of Solid Waste, U.S. Environmental Protection Agency, Washington, D.C. p.8-9.
}
\keyword{datasets}
|
b55cd4e5fd3d67fced4117ca132e9e1d29303588
|
ed8db4d0856d644f1182b09033a0b72c819d461f
|
/R/classMethod_plotStoichiometry.r
|
da1c3a0176554421fb9c2630e9c47ed75de1630b
|
[] |
no_license
|
dkneis/rodeo
|
f53e2157f0142574e2b3ead14e4e3fffe9d5ac4d
|
d976d5dd2a439f1df4b4e6b1446b4a62cc82fec7
|
refs/heads/master
| 2022-02-18T14:29:42.324322
| 2022-01-31T16:49:14
| 2022-01-31T16:49:14
| 30,355,625
| 7
| 4
| null | 2015-07-28T15:59:28
| 2015-02-05T13:01:50
|
R
|
UTF-8
|
R
| false
| false
| 4,008
|
r
|
classMethod_plotStoichiometry.r
|
#' Plot Qualitative Stoichiometry Matrix
#'
#' Visualizes the stoichiometry matrix using standard plot methods. The sign
#' of stoichiometric factors is displayed as upward and downward pointing
#' triangles. Also visualized are dependencies of process rates on variables.
#'
#' @name plotStoichiometry
#'
#' @param box A positive integer pointing to a spatial sub-unit of the model.
#' @param time Time. The value is ignored in the case of autonomous models.
#' @param cex Character expansion factor.
#' @param colPositive Color for positive stoichiometric factors.
#' @param colNegative Color for negative stoichiometric factors.
#' @param colInteract Color used to highlight dependencies.
#' @param colBack Color of background.
#' @param colGrid Color of a grid.
#' @param lwdGrid Grid line width.
#' @param translateVars Optional function to recode variable labels.
#' Must take the original vector as argument and return the altered version.
#' @param translatePros Optional function to recode process labels.
#' Must take the original vector as argument and return the altered version.
#'
#' @return NULL
#'
#' @note The values of state variables and parameters must have been set using
#' the \code{\link{setVars}} and \code{\link{setPars}} methods. If the
#' stoichiometric factors are mathematical expressions involving
#' function references, these functions must be defined in R (even if the
#' numerical computations are based on generated Fortran code).
#'
#' @author \email{david.kneis@@tu-dresden.de}
#'
#' @seealso See other methods of the \code{\link{rodeo-class}} or
#' \code{\link{stoichiometry}} for computing the stoichiometric factors only.
#' Alternative options for displaying stoichiometry information are described
#' in the package vignette.
#'
#' @examples
#' data(vars, pars, funs, pros, stoi)
#' model <- rodeo$new(vars, pars, funs, pros, stoi, dim=c(1))
#' model$setVars(c(bac=0.1, sub=0.5))
#' model$setPars(c(mu=0.8, half=0.1, yield= 0.1, vol=1000, flow=50, sub_in=1))
#' monod <- function(c,h) {c/(c+h)}
#' model$plotStoichiometry(box=c(1))
rodeo$set("public", "plotStoichiometry", function(box, time=0, cex=1,
shade=TRUE, colPositive="tomato3", colNegative="steelblue4",
colInteract="grey", colBack="lightgrey", colGrid="white", lwdGrid=1,
translateVars=NULL, translatePros=NULL
) {
if (is.null(translateVars)) translateVars <- function(x) {x}
if (is.null(translatePros)) translatePros <- function(x) {x}
m <- self$stoichiometry(box=box, time=time)
if (!all(is.finite(m))) {
stop("non-finite elements in stoichiometry matrix")
}
s <- replace(m, 1:length(m), colBack)
if (shade) {
for (pro in rownames(s)) {
expr <- private$prosTbl$expression[private$prosTbl$name==pro]
ident <- extractIdentifiers(expr)
v <- colnames(s)[colnames(s) %in% ident]
if (length(v) > 0) {
s[pro, v] <- colInteract
}
}
}
dx <- 0.2
dy <- sqrt((dx**2)/2)
mar <- 0.5
xmin <- 1-mar
xmax <- ncol(m)+mar
ymin <- 1-mar
ymax <- nrow(m)+mar
plot(0, 0, xlim=c(xmin, xmax), ylim=c(ymin, ymax), type="n",
bty="n", xaxt="n", yaxt="n", xlab="", ylab="")
rect(xleft=xmin, xright=xmax, ybottom=ymin, ytop=ymax, col=colBack, border=NA)
mtext(side=3, at=1:ncol(m), translateVars(colnames(m)), line=0.5, las=2, cex=cex)
mtext(side=2, at=nrow(m):1, translatePros(rownames(m)), line=0.5, las=2, cex=cex)
for (ic in 1:ncol(m)) {
for (ir in 1:nrow(m)) {
rect(xleft=ic-0.5, xright=ic+0.5, ybottom=nrow(m)+1-(ir-0.5), ytop=nrow(m)+1-(ir+0.5),
col=s[ir,ic], border=NA)
if (m[ir,ic] > 0) polygon(x=c(ic-dx,ic+dx,ic,ic-dx),
y=nrow(m)+1-c(ir+dy,ir+dy,ir-dy,ir+dy), col=colPositive, border=NA)
if (m[ir,ic] < 0) polygon(x=c(ic-dx,ic+dx,ic,ic-dx),
y=nrow(m)+1-c(ir-dy,ir-dy,ir+dy,ir-dy), col=colNegative, border=NA)
}
}
abline(h=c((1:nrow(m))-0.5,nrow(m)+0.5),
v=c((1:ncol(m))-0.5,ncol(m)+0.5), lwd=lwdGrid, col=colGrid)
return(invisible(NULL))
})
|
42498c31922bcd4f31befbf36f739ef905956c7f
|
215ca85a9ff709d1e221b3165e7e10eb783cf29c
|
/man/dots_as_list.Rd
|
45230365ab513301fc7d3bc90518b53ea1ed608e
|
[] |
no_license
|
dpcarballo/coloR
|
2b845eb3493038d5458db4c76ac25d449c92429c
|
2e1b2b3808d784cbf27a5d464fa8189e5faf4197
|
refs/heads/master
| 2020-05-04T17:06:06.094781
| 2019-04-25T11:38:31
| 2019-04-25T11:38:31
| 179,273,901
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 730
|
rd
|
dots_as_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dots_as_list.R
\name{dots_as_list}
\alias{dots_as_list}
\title{Standardizes the format of ... arguments. If a list is given, no
modification is made. Otherwise, a list is created with all arguments
in ...}
\usage{
dots_as_list(...)
}
\arguments{
\item{...}{Input to be converted into a list.}
}
\value{
A list containing the ... arguments regardless of whether they originally were a list or not.
}
\description{
Standardizes the format of ... arguments. If a list is given, no
modification is made. Otherwise, a list is created with all arguments
in ...
}
\examples{
identical(dots_as_list(3,4,5), dots_as_list(list(3,4,5)))
}
|
1e6dba013416226b442329b029d73041476f3b37
|
545b068c23bb88048d59d3aa7d5235fa6c33ff5f
|
/R/csppFull.R
|
1e65aea7f2077260b1d0a3f59b47426f900d94ad
|
[] |
permissive
|
colbrydi/cspp
|
fc0b3e749c1e902c7e370a10c30f2cc109266398
|
80a7acceb5bc215b68411bc9755116bc5a74a458
|
refs/heads/master
| 2021-05-22T19:07:09.110745
| 2020-04-18T06:02:26
| 2020-04-18T06:02:26
| 253,053,208
| 0
| 0
|
MIT
| 2020-04-04T17:02:28
| 2020-04-04T17:02:28
| null |
UTF-8
|
R
| false
| false
| 1,260
|
r
|
csppFull.R
|
#' Full data from IPPSR Correlates of State Policy Project v2.2
#'
#' The Correlates of State Policy Project includes more than 2000 variables,
#' with observations across the 50 U.S. states and across time (1900-2016, approximately).
#' These variables represent policy outputs or political, social, or economic factors
#' that may influence policy differences. The codebook includes the variable name,
#' a short description of the variable, the variable time frame,
#' a longer description of the variable, and the variable source(s) and notes.
#'
#' This aggregated dataset is only possible because many scholars and students have spent
#' countless hours creating, collecting, cleaning, and making data publicly available.
#' Thus, if you use the dataset, please cite the original data sources.
#'
#' @docType data
#'
#' @usage data(csppFull)
#'
#' @format A data frame of 6120 rows and 2091 variables. Each observation is year-state.
#'
#' @keywords datasets
#'
#' @references
#' Jordan, Marty P. and Matt Grossmann. 2020. The Correlates of State Policy Project v.2.2.
#' East Lansing, MI: Institute for Public Policy and Social Research (IPPSR).
#'
#' @source \href{http://ippsr.msu.edu/public-policy/correlates-state-policy}
"csppFull"
|
9fc16d07987a66e0bd7bf1d01c88db785b328b0d
|
ef499c17b1a1c7aca1a8c52c03ad29e2d24ae80d
|
/Extra/envir.R
|
0306feb1849490741778ce79b885fe10a6e78188
|
[
"MIT"
] |
permissive
|
Protonk/debugcast
|
a554cb495ebf77c507dd0c60460bf11245ca07d1
|
39372bbb4280afc1c7cc53c26f73e8783f6bbf15
|
refs/heads/master
| 2016-09-06T05:47:43.064075
| 2013-07-01T15:50:28
| 2013-07-01T15:50:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,029
|
r
|
envir.R
|
## Reasoning about Scope in R
# R is lexically scoped and uses function closures to build scopes
# see http://darrenjw.wordpress.com/2011/11/23/lexical-scope-and-function-closures-in-r/
# for some more on that.
## Borrowing examples from that post...
a <- 1
b <- 2
f<-function( x ) {
a * x + b
}
g<-function( x ) {
a <- 2
b <- 1
f( x ) # remember, a * x + b
}
g( 2 )
## In JS it's similar (whew!)
var a = 1;
var b = 2;
function f( x ) {
return a * x + b;
}
function g( x ) {
var a = 2;
var b = 1;
return f ( x );
}
g( 2 )
## following along in that post, we can exploit function closures
## to give us what may be the expected outcome
a <- 1
b <- 2
f <- function( a, b ) {
return( function( x ) {
a * x + b
})
}
g <- f( 2, 1 )
g( 2 )
## We can also use the debugging tools to help us reason about scope
## in R.
stuff <- function( x ) {
len <- length( x )
{
y <- 1:10
z <- sys.status()
# if we wrap sys.status with print, we'll see another frame
print(z)
}
return(len * y)
}
# we can also see
as.list(body(stuff))
# to see how blocks are useful.
# Like JS there is no block scope in R
# If there were, we could enter in to a frame created by the block
## So this is all well and good, but what does that mean for debugging?
# Under the hood, scope is managed as environments.
# in 99% of cases, this is merely an implementation detail
# but since r uses it for variable lookup we want to pay close attention
# and environments in R are treated as first class citizens, which can be weird.
# http://cran.r-project.org/doc/manuals/R-lang.html#Environment-objects
# in the chrome dev tools and firebug we can get access to the scope chain
# and the local variables in it
# with R, there's no distinction. Environments contain variable references
# and a reference to the enclosing scope.
# because they're first class citizens we can manipulate them,
# assign objects in them arbitrarily and
# explicitly reference them or objects inside them from any point.
|
31f793b9a8b6a9a215b986567e25899cb3451c74
|
01590bb4a4f5e2a3ec115b84e885cd565d3eef87
|
/man/wilkinson.Rd
|
66898af67af8bf2762421a4ac78576e7041af236
|
[] |
no_license
|
cran/labeling
|
09b1faa60e804dfce3a0e82077bf7c8c730e3b81
|
f0cca6d69a746b8bffd9f8fd170776c1ae2775d9
|
refs/heads/master
| 2021-05-15T01:55:59.170010
| 2020-10-20T06:00:09
| 2020-10-20T06:00:09
| 17,696,963
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,040
|
rd
|
wilkinson.Rd
|
\name{wilkinson}
\alias{wilkinson}
\title{Wilkinson's labeling algorithm}
\usage{
wilkinson(dmin, dmax, m,
Q = c(1, 5, 2, 2.5, 3, 4, 1.5, 7, 6, 8, 9),
mincoverage = 0.8,
mrange = max(floor(m/2), 2):ceiling(6 * m))
}
\arguments{
\item{dmin}{minimum of the data range}
\item{dmax}{maximum of the data range}
\item{m}{number of axis labels}
\item{Q}{set of nice numbers}
\item{mincoverage}{minimum ratio between the the data
range and the labeling range, controlling the whitespace
around the labeling (default = 0.8)}
\item{mrange}{range of \code{m}, the number of tick
marks, that should be considered in the optimization
search}
}
\value{
vector of axis label locations
}
\description{
Wilkinson's labeling algorithm
}
\note{
Ported from Wilkinson's Java implementation with some
changes. Changes: 1) m (the target number of ticks) is
hard coded in Wilkinson's implementation as 5. Here we
allow it to vary as a parameter. Since m is fixed,
Wilkinson only searches over a fixed range 4-13 of
possible resulting ticks. We broadened the search range
to max(floor(m/2),2) to ceiling(6*m), which is a larger
range than Wilkinson considers for 5 and allows us to
vary m, including using non-integer values of m. 2)
Wilkinson's implementation assumes that the scores are
non-negative. But, his revised granularity function can
be extremely negative. We tweaked the code to allow
negative scores. We found that this produced better
labelings. 3) We added 10 to Q. This seemed to be
necessary to get steps of size 1. It is possible for
this algorithm to find no solution. In Wilkinson's
implementation, instead of failing, he returns the
non-nice labels spaced evenly from min to max. We want
to detect this case, so we return NULL. If this happens,
the search range, mrange, needs to be increased.
}
\author{
Justin Talbot \email{justintalbot@gmail.com}
}
\references{
Wilkinson, L. (2005) The Grammar of Graphics,
Springer-Verlag New York, Inc.
}
|
ca709b848c83c13e11263599468898beb1783256
|
1e7e4b09aa76962fbb0469de67f37fed5dc826ad
|
/tests/testthat.R
|
815989f552f0ebfb69684fb5eeb6827b801b8545
|
[] |
no_license
|
davesteps/projectNickel
|
ecc67ad804e0f3f6119913709a49374ae053cd1f
|
7fc7a647fb4de461911160620cf1929dc4925e2f
|
refs/heads/master
| 2020-03-31T03:36:26.173380
| 2019-11-30T18:21:13
| 2019-11-30T18:21:13
| 151,871,290
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
testthat.R
|
library(testthat)
library(projectNickel)
test_check("projectNickel")
|
25b3f622dd65538d791fb33365b1697f21b046c5
|
0e6d8c50bd6c0ef5e3c97b17626bb42c9e3d8eff
|
/R/design_nb.R
|
b760f457b5504507d60b82bbc54ea5b3f3973d08
|
[] |
no_license
|
tobiasmuetze/gscounts
|
e04903db1993df538065cc427c45f01d2904796f
|
1c614a3fd36be86a5608b83df91df040fbf0d98d
|
refs/heads/master
| 2021-11-23T23:36:17.203662
| 2021-11-01T16:14:35
| 2021-11-01T16:14:35
| 92,069,741
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,010
|
r
|
design_nb.R
|
#' @name design_nb
#' @title Clinical trials with negative binomial outcomes
#' @description Design a clinical trial with negative binomial outcomes
#' @param rate1 numeric; assumed rate of treatment group 1 in the alternative
#' @param rate2 numeric; assumed rate of treatment group 2 in the alternative
#' @param dispersion numeric; dispersion (shape) parameter of negative binomial distribution
#' @param power numeric; target power
#' @param ratio_H0 numeric; positive number denoting the rate ratio rate_1/rate_2
#' under the null hypothesis, i.e. the non-inferiority or superiority margin
#' @param sig_level numeric; Type I error / significance level
#' @param random_ratio numeric; randomization ratio n1/n2
#' @param t_recruit1 numeric vector; recruit (i.e. study entry) times in group 1
#' @param t_recruit2 numeric vector; recruit (i.e. study entry) times in group 2
#' @param study_period numeric; study duration
#' @param accrual_period numeric; accrual period
#' @param accrual_speed numeric; determines accrual speed; values larger than 1
#' result in accrual slower than linear; values between 0 and 1 result in accrual
#' faster than linear.
#' @param followup_max numeric; maximum exposure time of a patient
#' @return A list containing the following components:
#' \item{rate1}{as input}
#' \item{rate2}{as input}
#' \item{dispersion}{as input}
#' \item{power}{as input}
#' \item{ratio_H0}{as input}
#' \item{ratio_H1}{ratio \code{rate1}/\code{rate2}}
#' \item{sig_level}{as input}
#' \item{random_ratio}{as input}
#' \item{t_recruit1}{as input}
#' \item{t_recruit2}{as input}
#' \item{study_period}{as input}
#' \item{followup_max}{as input}
#' \item{max_info}{maximum information}
#' @examples
#' # Calculate sample size for given accrual period and study duration assuming uniformal accrual
#' out <- design_nb(rate1 = 0.0875, rate2 = 0.125, dispersion = 5, power = 0.8,
#' ratio_H0 = 1, sig_level = 0.025,
#' study_period = 4, accrual_period = 1, random_ratio = 2)
#' out
#'
#' # Calculate sample size for a fixed exposure time of 0.5 years
#' out <- design_nb(rate1 = 4.2, rate2 = 8.4, dispersion = 3, power = 0.8,
#' ratio_H0 = 1, sig_level = 0.025,
#' followup_max = 0.5, random_ratio = 2)
#' out
#'
#' # Calculate study period for given recruitment time
#' t_recruit1 <- seq(0, 1.25, length.out = 1200)
#' t_recruit2 <- seq(0, 1.25, length.out = 800)
#' out <- design_nb(rate1 = 0.0875, rate2 = 0.125, dispersion = 5, power = 0.8,
#' ratio_H0 = 1, sig_level = 0.025,
#' t_recruit1 = t_recruit1, t_recruit2 = t_recruit2)
#' @import stats
#' @export
design_nb <- function(rate1, rate2, dispersion, power, ratio_H0 = 1, sig_level,
random_ratio = 1, t_recruit1 = NULL,
t_recruit2 = NULL, study_period = NULL, accrual_period = NULL,
followup_max = NULL,
accrual_speed = 1) {
arguments <- as.list(environment())
# Error check for accrual speed
if (accrual_speed <= 0) stop("accrual_speed must be positive")
# Calculate maximum information required to obtain power
max_info <- (qnorm(1-sig_level) + qnorm(power))^2 / log(rate1 / rate2 / ratio_H0)^2
isNull_fm <- is.null(followup_max)
isNull_sp <- is.null(study_period)
isNull_ap <- is.null(accrual_period)
isNull_t1 <- is.null(t_recruit1)
isNull_t2 <- is.null(t_recruit2)
# Calcualte the missing design value
if (all(!isNull_fm, isNull_sp, isNull_ap)) {
# Calculate the sample size for a fixed individual follow-up
out <- samplesize_from_followup(max_info = max_info, random_ratio = random_ratio,
rate1 = rate1, rate2 = rate2, shape = dispersion,
followup_max = followup_max)
} else if (all(isNull_sp, isNull_fm, isNull_ap, !isNull_t1, !isNull_t1)) {
# Calculate study period for given recruitment times
out <- studyperiod_from_recruit(max_info = max_info, random_ratio = random_ratio,
rate1 = rate1, rate2 = rate2, shape = dispersion,
t_recruit1 = t_recruit1, t_recruit2 = t_recruit2)
} else if (all(!isNull_sp, !isNull_ap, isNull_fm, isNull_t1, isNull_t1)) {
# Calculate sample size for fixed accrual and study period (using uniform recruitment)
out <- samplesize_from_periods(max_info = max_info, accrual_period = accrual_period,
study_period = study_period,
random_ratio = random_ratio,
rate1 = rate1, rate2 = rate2, shape = dispersion,
accrual_speed = accrual_speed)
} else {
stop("No appropriate combination of input arguments is defined")
}
out <- c(arguments[c("rate1", "rate2", "dispersion", "power", "ratio_H0", "sig_level")], out)
class(out) <- "nb"
out
}
|
6219a6b5e2d15fb8537642c1da059a3a37180840
|
4c3c7bedd64dae0d8726739393f38d6863b32d8f
|
/R/modelSelection-methods.R
|
b8496df5da1d25a0c7429bff8cd13b47ecb5a36e
|
[] |
no_license
|
ste-depo/INSPEcT
|
11425638964219b0b619cc75b13f17dc1e4682e6
|
e56818c3166c95809c1b05ead6b36b7395559932
|
refs/heads/master
| 2022-12-20T01:06:23.322215
| 2020-09-29T15:22:08
| 2020-09-29T15:22:08
| 113,035,680
| 5
| 2
| null | 2020-08-09T08:13:42
| 2017-12-04T11:45:43
|
R
|
UTF-8
|
R
| false
| false
| 1,393
|
r
|
modelSelection-methods.R
|
#' @rdname modelSelection
#'
#' @description
#' Method to visualize the criteria used to assess variability of rates.
#'
#' @param object An object of class INSPEcT or INSPEcT_model
#'
#' @return
#' \itemize{
#' \item modelSelection 'aic' compares nested models closest to the one with lowest AIC, 'llr' compares all nested models,
#' 'hib' is a mix between the previous two. (default 'aic')
#' \item preferPValue a logical, if TRUE (default) limit the search for best models among the ones with succeded the goodness of fit test.
#' \item padj a logical, if TRUE (default) correct the p-values for multiple testing
#' \item goodness_of_fit a numeric, the threshold for the goodness-of-fit test (default = .1)
#' \item variability a numeric, a vector with the thresholds for the variability test (one threshold for each rate, default = c('s'=05, 'p'=.05, 'd'=.05))
#' \item limitModelComplexity a logical that limits the complexity of the function used to describe dynamics to the length of the time-course (default = FALSE)
#'
#' }
#' @examples
#' nascentInspObj10 <- readRDS(system.file(package='INSPEcT', 'nascentInspObj10.rds'))
#' modelSelection(nascentInspObj10)
setMethod('modelSelection', 'INSPEcT', function(object) {
return(modelSelection(object@model))
})
#' @rdname modelSelection
setMethod('modelSelection', 'INSPEcT_model', function(object) {
return(object@params)
})
|
2fcad8e03210dcc869f1ccca105045cbf91e0f03
|
2fd6208ee163b1f959d9960c7fad4c64da96d649
|
/R/utils_assert.R
|
bf3d236a552dcd5955834ca01c63556c8de9b001
|
[
"MIT"
] |
permissive
|
ropensci/bold
|
c1d661c27e8a44e5ec5f8b123e9d8b8ab3b25fd0
|
dc46f3be5f0dc5404e9288a9836739d12d127207
|
refs/heads/master
| 2023-07-22T16:04:23.257812
| 2023-06-13T16:19:44
| 2023-06-13T16:19:44
| 1,950,836
| 17
| 16
|
NOASSERTION
| 2023-09-08T19:49:36
| 2011-06-25T03:23:46
|
R
|
UTF-8
|
R
| false
| false
| 4,529
|
r
|
utils_assert.R
|
b_assert <- function(x,
what,
name = NULL,
check.length = NULL) {
if (!length(name)) {
name <- substitute(x)
}
msgLen <- if (length(check.length) && !isFALSE(check.length)) {
b_assert_length(x = x, len = check.length, name = name,
stopOnFail = length(x) > 0)
} else {
NULL
}
msgClass <- if (length(x)) {
b_assert_class(x = x, what = what, name = name,
is2nd = length(msgLen), stopOnFail = FALSE)
} else {
NULL
}
msg <- c(msgLen, msgClass)
if (length(msg)) {
stop(msg, call. = FALSE)
}
}
b_assert_class <- function(x, what, name, is2nd = FALSE, stopOnFail = TRUE) {
.fun <- if (stopOnFail) stop else paste0
if (!inherits(x = x, what = what)) {
if (!is2nd)
.fun("'", name, "' must be of class ", b_ennum(what, "or"))
else
.fun(" and of class ", b_ennum(what, "or"))
} else {
NULL
}
}
b_assert_length <- function(x, len, name, stopOnFail = TRUE) {
len <- as.integer(len)
if (!is.na(len) && len >= 0) {
.fun <- if (stopOnFail) stop else paste0
if (len == 0 && !length(x)) {
.fun("'", name, "' can't be empty")
} else if (len > 0 && length(x) != len) {
.fun("'", name, "' must be length ", len)
}
}
}
b_assert_logical <- function(x, name = NULL) {
b_assert_length(x, len = 1L, name = name)
if (!length(name)) name <- substitute(x)
x <- tolower(x)
if (x == "true" || x == "1")
TRUE
else if (x == "false" || x == "0" || x == "na")
FALSE
else
stop("'", name, "' should be one of TRUE or FALSE")
}
b_validate <- function(x, choices, name){
wrong <- !x %in% choices
if (any(wrong)) {
stop(
b_ennum(x[wrong], quote = TRUE),
if (sum(wrong) > 1)
" are not valid "
else
" is not a valid ",
name,
"\nChoices are ",
b_ennum(choices, join_word = "or", quote = TRUE),
call. = FALSE
)
}
}
b_get_db <- function(x){
opts <- list(case_insensitive = TRUE)
if (b_detect(x, '^COX[1I]$', opts_regex = opts))
"COX1"
else if (b_detect(x, '^pub(lic)?$|_public$', opts_regex = opts))
"COX1_SPECIES_PUBLIC"
else if (b_detect(x, '^spe(cies)?$|_species$', opts_regex = opts))
"COX1_SPECIES"
else if (b_detect(x, '^(cox[1I]_)?(l640)?bp$', opts_regex = opts))
"COX1_L640bp"
else
x
}
b_assert_db <- function(x){
b_assert(x, "character", name = "db", check.length = 1L)
x <- b_get_db(x)
b_validate(x, choices = b_db, name = "db")
x
}
b_get_tax_division <- function(x){
x <- tolower(x)
x[b_detect(x, '^animal')] <- "Animalia"
x[b_detect(x, '^prot')] <- "Protista"
x[b_detect(x, '^fun')] <- "Fungi"
x[b_detect(x, '^plant')] <- "Plantae"
x
}
b_assert_tax_division <- function(x){
if (length(x)) {
b_assert(x, what = "character", name = "tax_division")
x <- b_get_tax_division(x)
b_validate(x, choices = b_tax_division, name = "tax_division")
}
x
}
b_get_tax_rank <- function(x){
x <- tolower(x)
x[b_detect(x, '^king')] <- "kingdom"
x[b_detect(x, '^phy')] <- "phylum"
x[b_detect(x, '^cla')] <- "class"
x[b_detect(x, '^ord')] <- "order"
x[b_detect(x, '^fam')] <- "family"
x[b_detect(x, '^subfam')] <- "subfamily"
x[b_detect(x, '^tribe')] <- "tribe"
x[b_detect(x, '^gen')] <- "genus"
x[b_detect(x, '^spe')] <- "species"
x[b_detect(x, '^subspe')] <- "subspecies"
x
}
b_assert_tax_rank <- function(x){
if (length(x)) {
b_assert(x, what = "character", name = "tax_rank")
x <- b_get_tax_rank(x)
b_validate(x, choices = b_tax_rank, name = "tax_rank")
}
x
}
b_assert_format <- function(x){
b_assert(x, what = "character", check.length = 1L, name = "format")
x <- tolower(x)
if (x != "xml" && x != "tsv")
stop("'format' should be one of 'xml' or 'tsv'")
else
x
}
b_get_dataTypes <- function(x){
x <- tolower(x)
if (any(x == "all")) {
x <- "all"
} else {
# corrects for the json typo in case the option is taken from a previous query
# and for short versions/typos
x[x == "basics"] <- "basic"
x[x == "depo" | x == "depositories"] <- "depository"
x[x == "labs" | x == "sequencinglab"] <- "sequencinglabs"
x[x == "stat"] <- "stats"
x[x == "img"] <- "images"
x[x == "wiki"] <- "thirdparty"
}
x
}
b_assert_dataTypes <- function(x){
b_assert(x, what = "character", name = "dataTypes", check.length = 0L)
x <- b_get_dataTypes(x)
b_validate(x, choices = b_dataTypes, name = "dataTypes")
x
}
|
1ce1c5ce58d5d573b3943f39bddbd31d28451e34
|
98b87f6e7e180948960cacd5dab7080914c16984
|
/man/metals.Rd
|
fd520ac301767cb42b1c7e635d65afe475d61eee
|
[] |
no_license
|
alexpkeil1/qgcomp
|
ef1543e24d64ce3afcf5c6385904e3875d14ab5f
|
b09d7082bd1a3ea7508a0a2954d7351dfb2243f8
|
refs/heads/main
| 2023-08-17T21:12:52.820929
| 2023-08-10T12:27:31
| 2023-08-10T12:27:31
| 154,714,135
| 16
| 6
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,467
|
rd
|
metals.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{metals}
\alias{metals}
\title{Well water data}
\format{
A data frame with 253 rows and 24 variables:
\describe{
\item{y}{continuous birth outcome}
\item{disease_state}{binary outcome}
\item{disease_time}{time-to-disease_state: survival outcome censored at approximately the median}
\item{arsenic}{metal}
\item{barium}{metal}
\item{cadmium}{metal}
\item{calcium}{metal}
\item{chloride}{metal}
\item{chromium}{metal}
\item{copper}{metal}
\item{iron}{metal}
\item{lead}{metal}
\item{magnesium}{metal}
\item{manganese}{metal}
\item{mercury}{metal}
\item{selenium}{metal}
\item{silver}{metal}
\item{sodium}{metal}
\item{zinc}{metal}
\item{mage35}{Binary covariate: maternal age > 35}
\item{nitrate}{water chemistry measure}
\item{nitrite}{water chemistry measure}
\item{sulfate}{water chemistry measure}
\item{ph}{water chemistry measure}
\item{total_alkalinity}{water chemistry measure}
\item{total_hardness}{water chemistry measure}
}
}
\usage{
data(metals)
}
\description{
Simulated well water measurements in North Carolina: 16 metals, 6 water chemistry
measures, and 2 health outcomes (y = continuous; disease_state = binary/time-to-event
in combination with disease_time)
A dataset containing well water measurements and health outcomes for 253 individuals.
All continuous variables are standardized to have mean 0, standard deviation 1.
}
\keyword{datasets}
|
1075b107a2d09bce20925e72b627560e7dd60d3d
|
00eccc7f397dc1c28a13b82cacd2d1fab4181d71
|
/R/hpp.event.times.R
|
0f18d6f69bbb78119f602f58edf679d88862fb32
|
[] |
no_license
|
parsifal9/PPspectra
|
57177f9bbc639f2579f69c7c1a5d5c52c91a193b
|
917e9ff977c3bf14fd4685ae561454a0622643f4
|
refs/heads/master
| 2023-01-18T23:36:34.220339
| 2020-11-24T23:56:37
| 2020-11-24T23:56:37
| 268,259,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 399
|
r
|
hpp.event.times.R
|
#' simulate events from a homogeneous Poisson process
#'
#'
#'
#' @param rate
#' @param num.events
#'
#' @return aa1 vector of event times
#'
#' @export
hpp.event.times<-function (rate, num.events, t0 = 0,tT=1,sig=4)
{
x = matrix(rexp(n = num.events , rate = rate), num.events)
aa1 <- t0 + apply(x, 2, cumsum)
aa1<-round(aa1,sig)
aa1<-aa1[aa1< tT]
aa1<-unique(aa1)
aa1
}
|
416a572ccccfe00e942197a67855b058f77cd40d
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/methyl_methyl_((4-am.R
|
aaf909220c242d32e050c9de594e49916d2863ec
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
methyl_methyl_((4-am.R
|
library("knitr")
library("rgl")
#knit("methyl_methyl_((4-am.Rmd")
#markdownToHTML('methyl_methyl_((4-am.md', 'methyl_methyl_((4-am.html', options=c("use_xhml"))
#system("pandoc -s methyl_methyl_((4-am.html -o methyl_methyl_((4-am.pdf")
knit2html('methyl_methyl_((4-am.Rmd')
|
292af9817ab28cba2cec6f00718e26c5041b80b3
|
78471d997b690d876881cd4f1aaba90317a83b37
|
/plot3.R
|
2f2d0501634e524ed4e10c22ca8ef025c4714ae0
|
[] |
no_license
|
DataMX/ExData_Plotting1
|
d18f39cf6a39f6f2f0166cb3da0e822671442876
|
2068bddf0c2e06fe9898e40f87a315944d446645
|
refs/heads/master
| 2021-01-14T12:31:01.906896
| 2014-11-13T20:46:56
| 2014-11-13T20:46:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,298
|
r
|
plot3.R
|
#Plot 3
#This assumes you have downloaded and unzipped the file into your working directory
#Assumes that you have lubridate and dplyr
library (lubridate)
library (dplyr)
#Read Data in
household_power_consumption <- read.csv("~/Documents/Coursera_Data/ExploratoryDataAnalysis/Project1/household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE)
data <- tbl_df (household_power_consumption)
rm (household_power_consumption)
#Convert Date and Time to one column and change POSIXct using Lubridate dplyr in one step
data <- mutate(data, datetime = dmy_hms(paste(Date, Time, sep = " ")))
#Filter Dates
two_days <- filter (data, floor_date(datetime, unit = "day") == mdy("02/01/2007") | floor_date(datetime, unit = "day") == mdy("02/02/2007") )
#Open Device as png, label file, set size and background
png(filename = "plot3.png", width = 480, height = 480, bg = "transparent")
plot( two_days$datetime, two_days$Sub_metering_1, ylab = "Energy sub metering", xlab = " ", type = "l")
lines( two_days$datetime, two_days$Sub_metering_2, type = "l", col = "red")
lines( two_days$datetime, two_days$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1)
dev.off()
|
187c1d5151406370e0057023f4398f42c12cb374
|
31c59aac8c659fbf4b5dbdbbbb349e9d3420c276
|
/man/ReduceAnomalies.Rd
|
e21eb7051016381c017e6ea67f4da7b3e3ee1215
|
[] |
no_license
|
cran/otsad
|
75b92a8b1c6309f38950c8a6ac47e32a24b474e0
|
9a05ce0cfd3efe4cb6bc21d046b793b607cf3869
|
refs/heads/master
| 2020-12-22T01:53:10.335201
| 2019-09-06T08:50:02
| 2019-09-06T08:50:02
| 236,634,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,178
|
rd
|
ReduceAnomalies.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ReduceAnomalies.R
\name{ReduceAnomalies}
\alias{ReduceAnomalies}
\title{Reduce Anomalies}
\usage{
ReduceAnomalies(data, windowLength, incremental = FALSE,
last.res = NULL)
}
\arguments{
\item{data}{Numerical vector with anomaly labels.}
\item{windowLength}{Window length.}
\item{incremental}{TRUE for incremental processing and FALSE for classic processing}
\item{last.res}{Last result returned by the algorithm.}
}
\value{
If \code{incremental} = FALSE, new Numerical vector with reduced anomaly labels. Else,
a list of the following items.
\item{result}{New Numerical vector with reduced anomaly labels.}
\item{last.res}{Last result returned by the algorithm. It is a list with \code{pointer},
the index of the last anomaly and \code{index}, the index number of the last point in the data}
}
\description{
\code{ReduceAnomalies} It reduces the number of detected anomalies. This function is
designed to reduce the number of false positives keeping only the first detection of all those
that are close to each other. This proximity distance is defined by a window
}
\examples{
## EXAMPLE 1: Classic Processing ----------------------
## Generate data
set.seed(100)
n <- 350
x <- sample(1:100, n, replace = TRUE)
x[70:90] <- sample(110:115, 21, replace = TRUE)
x[25] <- 200
x[320] <- 170
df <- data.frame(timestamp = 1:n, value = x)
## Calculate anomalies
result <- IpSdEwma(
data = df$value,
n.train = 5,
threshold = 0.01,
l = 2
)
res <- cbind(df, result$result)
## Plot results
PlotDetections(res, title = "SD-EWMA ANOMALY DETECTOR")
## Reduce anomalies
res$is.anomaly <- ReduceAnomalies(res$is.anomaly, windowLength = 5)
## Plot results
PlotDetections(res, title = "SD-EWMA ANOMALY DETECTOR")
## EXAMPLE 2: Incremental Processing ----------------------
\donttest{
# install.packages("stream")
library("stream")
# Generate data
set.seed(100)
n <- 350
x <- sample(1:100, n, replace = TRUE)
x[70:90] <- sample(110:115, 21, replace = TRUE)
x[25] <- 200
x[320] <- 170
df <- data.frame(timestamp = 1:n, value = x)
dsd_df <- DSD_Memory(df)
# Initialize parameters for the loop
last.res <- NULL
red.res <- NULL
res <- NULL
nread <- 100
numIter <- ceiling(n/nread)
# Calculate anomalies
for(i in 1:numIter) {
# read new data
newRow <- get_points(dsd_df, n = nread, outofpoints = "ignore")
# calculate if it's an anomaly
last.res <- IpSdEwma(
data = newRow$value,
n.train = 5,
threshold = 0.01,
l = 2,
last.res = last.res$last.res
)
if(!is.null(last.res$result)){
# reduce anomalies
red.res <- ReduceAnomalies(last.res$result$is.anomaly,
windowLength = 5, incremental = TRUE, last.res = red.res$last.res)
last.res$result$is.anomaly <- red.res$result
# prepare the result
res <- rbind(res, cbind(newRow, last.res$result))
}
}
# Plot results
PlotDetections(res, title = "SD-EWMA ANOMALY DETECTOR")
}
}
|
d7e0c0e493684d12303df5182eff4cf0cd505317
|
bcf2082e53630c6f415b373c6c6256a20d3bbe8a
|
/MW_CLEANINGDATA.R
|
feaa19eaec83abe6253e92314ca56f75d45b0cf8
|
[] |
no_license
|
jjanezhang/mighty_well_sp20
|
ec1663fccfac2a267094433eaef14e799b698921
|
37c41f5298d8bef707b1e6de4d9d9d1eca3d7154
|
refs/heads/master
| 2021-03-13T08:53:34.750657
| 2020-03-25T01:47:13
| 2020-03-25T01:47:13
| 246,661,550
| 0
| 0
| null | 2020-03-11T19:25:53
| 2020-03-11T19:25:52
| null |
UTF-8
|
R
| false
| false
| 1,725
|
r
|
MW_CLEANINGDATA.R
|
library(RMySQL)
library(dplyr)
library(plyr)
library(ggplot2)
setwd('C:\\Users\\Jayesh\\Downloads')
customers_export <- read.csv("customers_export_MW.csv", header = T)
orders_export <- read.csv("orders_export_1 MW.csv", header = T)
customers_export_spent <-customers_export[( customers_export$Total.Spent > 0),]
customers_export_spent_zipcode <- aggregate(customers_export_spent$Total.Spent,by=list(customers_export_spent$Zip),sum)
customers_export_spent_city <- aggregate(customers_export_spent$Total.Spent, by=list(customers_export_spent$City), sum)
customers_export_orders_zipcode <- aggregate(customers_export_spent$Total.Orders, by = list(customers_export_spent$Zip), sum)
customers_export_orders_city <- aggregate(customers_export_spent$Total.Orders, by=list(customers_export_spent$City), sum)
customers_export_orders_city <- customers_export_orders_city[order(customers_export_orders_city$Group.1),]
customers_export_orders_zipcode <- customers_export_orders_zipcode[order(customers_export_orders_zipcode$Group.1),]
customers_export_spent_city <- customers_export_spent_city[order(customers_export_spent_city$Group.1),]
customers_export_spent_zipcode <- customers_export_spent_zipcode[order(customers_export_spent_zipcode$Group.1),]
customers_export_spentorder_city <- customers_export_orders_city
customers_export_spentorder_zipcode <- customers_export_orders_zipcode
customers_export_spentorder_zipcode$x <- customers_export_spent_zipcode$x / customers_export_orders_zipcode$x
customers_export_spentorder_city$x <- customers_export_spent_city$x / customers_export_orders_city$x
#write.csv(customers_export_spentorder_zipcode, "zipcodeanalysis.csv")
write.csv(customers_export_spentorder_city, "citycodeanalysis.csv")
|
9b84d5e9b8ac6c37daa4bed7e6d6bd6ed41dbb86
|
229be3eec8eda763405e9147de5279d0c783b4ac
|
/xmlR/ex1.R
|
268b221934750a1bb52d1379259220e0e77b53f9
|
[] |
no_license
|
gvravi/healapp
|
98ed06127651361048d7f5e43add08f24c9d01bb
|
5ca7d0774f313f137bf4308706dc4d8fbf8d7976
|
refs/heads/master
| 2020-12-24T06:08:17.933512
| 2016-11-08T11:35:20
| 2016-11-08T11:35:20
| 49,939,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,956
|
r
|
ex1.R
|
library(XML)
setwd("~/R/xmlR")
#api from which data is collected
#http://api.kivaws.org/v1/lenders/jeremy/loans.xml
#We begin by parsing the XML document with
doc = xmlParse("loans.xml")
# We then pass doc to the xmlToList() function, and it will return an R
# list with an element for each of # its top-level child nodes, mapping
# each of these children to an R list and so on, in the same recursive
# way. It gives two lists paging(it contains list of 4) and
# loans(it contains list of 20). Run below commands and see lists
kivaList = xmlToList(doc, addAttributes = FALSE)
kivaList
# In our situation, the <loan>nodes are two levels below the root node so
# we need to access them to pass to xmlToDataFrame().We get the top-level/root
# node and then its <loans> node as follows:
#list two is <loans> we can call as below by name
loansNode = xmlRoot(doc)[["loans"]]
loansNode
#The xmlRoot() function gives us the top-level node of our document, i.e., <response>.
#To fetch # the <loans> subnode, we treat the root node as if it were a list in R
#and use the expression # node[["loans"]] to extract the (first) child node
#whose element name is <loans>. This is a convenient way to access child nodes.
#We can also index by position,as we know the second element is the <loans> node
#and the first element is <paging> node
#list two is <loans> we can call as below by index number
xmlRoot(doc)[[2]]
xmlChildren(loansNode)
# We want the list of <loan> nodes. The function xmlChildren() is the means for
# getting the list of all child nodes of a given node, e.g., the <loan> nodes
# under <loans>. We then pass this list of the individual <loan> nodes to
# xmlToDataFrame() to create a data frame with below command
loans = xmlToDataFrame(xmlChildren(loansNode))
names(loans)
# Note that this approach collapses the image column to just the value
# of the first child node in <image>
|
6e5f99e8eeadfa9906a21aad473b08a04c069be9
|
0b671e13616015dc4c98c5b8dc8284ea68e5a6df
|
/Guía 2/2.3.R
|
0239d6c2b74d54a0c1c3092ff129ab6230949dad
|
[] |
no_license
|
Juan-pri/IO-I
|
fcccd5e4a7dbc6c7d770938049f154b3083d3ba1
|
5cd4ab041c9ff33f33a575bf2d2e54d5d2283f5d
|
refs/heads/main
| 2023-05-07T12:39:32.115361
| 2021-05-23T02:47:13
| 2021-05-23T02:47:13
| 369,927,103
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,596
|
r
|
2.3.R
|
# Defino Variables
# Pi: cantidad de píldoras i (i=1,2,3,4,5,6,7)
# Vj: cantidad de vitamina j (j=1,2,3)
# Vji: cantidad de vitamina j en pildora i
# funcional: MIN -> Z= P1*c1+P2*c2+P3*c3+P4*c4+P5*c5+P6*c6+P7*c7
# sujeto a:
# REQ_V1 5*P1 + 2*P3 + 3*P5 + P6 + 2*P7 = 100
# REQ_V2 3*P1 + P2 + 5*P3 + 2*P5 + 6*P7 = 80
# REQMIN_V3 P1 + 3*P3 + P4 + 2*P5 + 6*P7 >= 120
# REQMAX_V3 P1 + 3*P3 + P4 + 2*P5 + 6*P7 <= 160
# Ingreso los costos
c1 <- 4
c2 <- 1
c3 <- 5
c4 <- 0.6
c5 <- 3.5
c6 <- 0.7
c7 <- 4
#Utilizo la librería lpSolve
library(lpSolve)
#Armo un vector con los coeficientes del funcional
Z <- c(c1,c2,c3,c4,c5,c6,c7)
#Armo una matriz con las restricciones
A <- matrix(c(5,0,2,0,3,1,2,
3,1,5,0,2,0,1,
1,0,3,1,2,0,6,
1,0,3,1,2,0,6), ncol = 7, byrow = T)
#Defino el vector soluciones de la matriz de restricciones
RHS <- c(100,80,120,160)
#Defino las restricciones
RESTR <- c('=','=','>=','<=')
#Solucion
solucion <- lp('min', Z, A, RESTR, RHS)
#Imprimo resultados
Z <- solucion$objval
P1 <- solucion$solution[1]
P2 <- solucion$solution[2]
P3 <- solucion$solution[3]
P4 <- solucion$solution[4]
P5 <- solucion$solution[5]
P6 <- solucion$solution[6]
P7 <- solucion$solution[7]
paste0("El costo total es =", Z)
paste0("La cantidad de pildoras 1 es =", P1)
paste0("La cantidad de pildoras 2 es =", P2)
paste0("La cantidad de pildoras 3 es =", P3)
paste0("La cantidad de pildoras 4 es =", P4)
paste0("La cantidad de pildoras 5 es =", P5)
paste0("La cantidad de pildoras 6 es =", P6)
paste0("La cantidad de pildoras 7 es =", P7)
|
59799101dff37424ea5e03685092ad3649053911
|
818dd3954e873a4dcb8251d8f5f896591942ead7
|
/Mouse/RNASequencing/snpanalysis.R
|
3830bcc0109d756a9a893928c50bde68f7d54df1
|
[] |
no_license
|
DannyArends/HU-Berlin
|
92cefa16dcaa1fe16e58620b92e41805ebef11b5
|
16394f34583e3ef13a460d339c9543cd0e7223b1
|
refs/heads/master
| 2023-04-28T07:19:38.039132
| 2023-04-27T15:29:29
| 2023-04-27T15:29:29
| 20,514,898
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,415
|
r
|
snpanalysis.R
|
# snpanalysis.R - Analyze the SNPs and indels called by the GenomeAnalysisToolKit
#
# copyright (c) 2014-2020 - Brockmann group - HU Berlin, Danny Arends
# last modified Aug, 2014
# first written Sep, 2014
# Filer: B6N == B6N && BFMI == BFMI (not on X, Y)
# Filter first: B6N != BFMI
# Then: Filter 3-alleles (perhaps keep them in as 2 alleles when 1 is very low < 2 %)
# Then: Find the Frequencies of the remaining SNPs in F1 (by group)
createNames <- function(x){ paste0(x[,1],":", x[,2],"_", x[,5]) }
setwd("E:/Mouse/DNA/DiversityArray/")
chrInfo <- read.table("Annotation/mouseChrInfo.txt", header=TRUE)
mlength <- max(chrInfo[,"Length"])
chromosomes <- as.character(c(1:19, "X", "Y", "MT"))
setwd("E:/Mouse/RNA/Sequencing/Reciprocal Cross B6 BFMI by MPI/")
B6Nm <- read.table("Analysis/5068_GAGTGG_L004_.snps.bcftools.vcf", colClasses="character")
B6Nf <- read.table("Analysis/5069_AGTCAA_L004_.snps.bcftools.vcf", colClasses="character")
colnames(B6Nm) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
colnames(B6Nf) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
BFMIm <- read.table("Analysis/4868_GCCAAT_L001_.snps.bcftools.vcf", colClasses="character")
BFMIf <- read.table("Analysis/5067_ATCACG_L004_.snps.bcftools.vcf", colClasses="character")
colnames(BFMIm) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
colnames(BFMIf) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
matB6_1 <- read.table("Analysis/5070_CGATGT_L005_.snps.bcftools.vcf", colClasses="character") # maternal B6N
matB6_2 <- read.table("Analysis/5071_CCGTCC_L005_.snps.bcftools.vcf", colClasses="character") # maternal B6N
matB6_3 <- read.table("Analysis/5072_TAGCTT_L005_.snps.bcftools.vcf", colClasses="character") # maternal B6N
rownames(matB6_1) <- createNames(matB6_1) ; rownames(matB6_2) <- createNames(matB6_2) ; rownames(matB6_3) <- createNames(matB6_3)
matB6 <- matB6_1[which(rownames(matB6_1) %in% rownames(matB6_2) & rownames(matB6_1) %in% rownames(matB6_3)), ]
colnames(matB6) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
colnames(matB6_1) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
colnames(matB6_2) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
colnames(matB6_3) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
matBFMI_1 <- read.table("Analysis/5073_TTAGGC_L006_.snps.bcftools.vcf", colClasses="character") # maternal BFMI
matBFMI_2 <- read.table("Analysis/5074_GATCAG_L006_.snps.bcftools.vcf", colClasses="character") # maternal BFMI
matBFMI_3 <- read.table("Analysis/5075_ATGTCA_L006_.snps.bcftools.vcf", colClasses="character") # maternal BFMI
rownames(matBFMI_1) <- createNames(matBFMI_1) ; rownames(matBFMI_2) <- createNames(matBFMI_2) ; rownames(matBFMI_3) <- createNames(matBFMI_3)
matBFMI <- matBFMI_1[which(rownames(matBFMI_1) %in% rownames(matBFMI_2) & rownames(matBFMI_1) %in% rownames(matBFMI_3)), ]
colnames(matBFMI) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
colnames(matBFMI_1) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
colnames(matBFMI_2) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
colnames(matBFMI_3) <- c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","SAMPLE")
rownames(B6Nm) <- createNames(B6Nm) ; rownames(B6Nf) <- createNames(B6Nf)
rownames(BFMIm) <- createNames(BFMIm) ; rownames(BFMIf) <- createNames(BFMIf)
rownames(matB6) <- createNames(matB6) ; rownames(matBFMI) <- createNames(matBFMI)
cat("SNPs matBFMI",nrow(matBFMI_1),nrow(matBFMI_2),nrow(matBFMI_3), nrow(matBFMI),"\n")
cat("SNPs matBFMI on X/Y/MT:", length(c(which(matBFMI[,"CHROM"] == "X"), which(matBFMI[,"CHROM"] == "MT"), which(matBFMI[,"CHROM"] == "Y"))),"\n")
cat("SNPs matBFMI",nrow(matB6_1),nrow(matB6_2),nrow(matB6_3), nrow(matB6),"\n")
cat("SNPs matB6N on X/Y/MT:", length(c(which(matB6[,"CHROM"] == "X"), which(matB6[,"CHROM"] == "MT"), which(matB6[,"CHROM"] == "Y"))),"\n")
doAnalysis <- function(maternal, m1, m2, m3){
mmatrix <- NULL
for(snp in rownames(maternal)){
if(maternal[snp,"FORMAT"] == "GT:PL"){
v1 <- strsplit(m1[snp,"INFO"], ";") ; v2 <- strsplit(m2[snp,"INFO"], ";") ; v3 <- strsplit(m3[snp,"INFO"], ";")
v1Reads <- as.numeric(unlist(strsplit(gsub("DP4=","",unlist(v1)[which(grepl("DP4", unlist(v1)))]),",")))
v2Reads <- as.numeric(unlist(strsplit(gsub("DP4=","",unlist(v2)[which(grepl("DP4", unlist(v2)))]),",")))
v3Reads <- as.numeric(unlist(strsplit(gsub("DP4=","",unlist(v3)[which(grepl("DP4", unlist(v3)))]),",")))
if(sum(v1Reads) >= 10 && sum(v2Reads) >= 10 && sum(v3Reads) >= 10){ # Minimum of 10 reads (combined for the alleles)
v1ReadsA <- sum(v1Reads[3:4]) ; v1Reads <- sum(v1Reads[1:4])
v2ReadsA <- sum(v2Reads[3:4]) ; v2Reads <- sum(v2Reads[1:4])
v3ReadsA <- sum(v3Reads[3:4]) ; v3Reads <- sum(v3Reads[1:4])
# if(length(v1ReadsA) == 2 && length(v2ReadsA) == 2 && length(v3ReadsA) == 2){ # Limit to bi-allelic SNPs
inBFMI <- c(which(rownames(BFMIm) == snp), which(rownames(BFMIf) == snp)) # SNP in BFMI males/females
inB6N <- c(which(rownames(B6Nm) == snp), which(rownames(B6Nf) == snp)) # SNP in B6N males/females
if(length(inBFMI) == 2 && length(inB6N) == 0){ # SNP found in BFMI, not B6N
#cat("SNP in BFMI\n")
r1 <- v1ReadsA/v1Reads; r2 <- v2ReadsA/v2Reads; r3 <- v3ReadsA/v3Reads
impScore <- (abs(r1 - 0.5) + abs(r2 - 0.5) + abs(r3 - 0.5)) / 3
cat(snp,": ", v1Reads, v2Reads, v3Reads,"->", r1, r2, r3, ":", impScore, "\n")
origin <- c("BFMI", "BFMI", "BFMI")
if(r1 < 0.5) origin[1] <- "B6N"
if(r2 < 0.5) origin[2] <- "B6N"
if(r3 < 0.5) origin[3] <- "B6N"
mmatrix <- rbind(mmatrix, c(snp, maternal[snp,"CHROM"], maternal[snp,"POS"], maternal[snp,"ID"], origin, inBFMI, v1ReadsA/v1Reads, v1Reads, v2ReadsA/v2Reads, v2Reads, v3ReadsA/v3Reads, v3Reads, impScore, "BFMIsnp"))
}
if(length(inBFMI) == 0 && length(inB6N) == 2){ # SNP found in B6N, not BFMI
#cat("SNP in B6N\n")
r1 <- v1ReadsA/v1Reads; r2 <- v2ReadsA/v2Reads; r3 <- v3ReadsA/v3Reads
impScore <- (abs(r1 - 0.5) + abs(r2 - 0.5) + abs(r3 - 0.5)) / 3
cat(snp,": ", v1Reads, v2Reads, v3Reads,"->", r1, r2, r3, ":", impScore, "\n")
origin <- c("B6N", "B6N", "B6N")
if(r1 < 0.5) origin[1] <- "BFMI"
if(r2 < 0.5) origin[2] <- "BFMI"
if(r3 < 0.5) origin[3] <- "BFMI"
mmatrix <- rbind(mmatrix, c(snp, maternal[snp,"CHROM"], maternal[snp,"POS"], maternal[snp,"ID"], origin, inB6N, v1ReadsA/v1Reads, v1Reads, v2ReadsA/v2Reads, v2Reads, v3ReadsA/v3Reads, v3Reads, impScore, "B6Nsnp"))
}
# }
}else{
cat("reads FAILED",v1Reads,v2Reads,v3Reads,"\n")
}
}
}
colnames(mmatrix) <- c("ID", "Chr", "Loc", "dbSNP", "Origin1", "Origin2", "Origin3", "OriginPaternal", "OriginMaternal", "R1", "N1", "R2", "N2", "R3", "N3", "ImprintingScore", "Detected")
return(mmatrix)
}
matB6Nsnps <- doAnalysis(matB6, matB6_1, matB6_2, matB6_3)
matBFMIsnps <- doAnalysis(matBFMI, matBFMI_1, matBFMI_2, matBFMI_3)
write.table(matB6Nsnps, file="maternalB6snps_10reads.txt", sep="\t", row.names=FALSE) # Also available for 10 reads per individual
write.table(matBFMIsnps, file="maternalBFMIsnps_10reads.txt", sep="\t", row.names=FALSE) # Also available for 10 reads per individual
### PLOT
setwd("E:/Mouse/RNA/Sequencing/Reciprocal Cross B6 BFMI by MPI/")
matB6Nsnps <- read.table("maternalB6snps_5reads.txt", sep="\t", header=TRUE)
matBFMIsnps <- read.table("maternalBFMIsnps_5reads.txt", sep="\t", header=TRUE)
plot(c(0, mlength), c(1,nrow(chrInfo)), t='n', main="SNP origin", yaxt="n", ylab="Chromosome", xlab="Length (Mb)", xaxt="n")
cnt <- 1
aa <- apply(matB6Nsnps, 1,function(x){
yloc <- match(as.character(x["Chr"]), chromosomes); xloc <- as.numeric(x["Loc"])
col <- "white"
if(as.numeric(x["ImprintingScore"]) > 0.3 && x["Origin"] == "BFMI") col <- "orange"
if(as.numeric(x["ImprintingScore"]) > 0.3 && x["Origin"] == "B6N") col <- "gray"
if(col != "white") points(x=xloc, y=yloc - 0.1, pch=15,cex=0.5, col=col)
})
aa <- apply(matBFMIsnps, 1,function(x){
yloc <- match(as.character(x["Chr"]), chromosomes); xloc <- as.numeric(x["Loc"])
col <- "white"
if(as.numeric(x["ImprintingScore"]) > 0.3 && x["Origin"] == "BFMI") col <- "orange"
if(as.numeric(x["ImprintingScore"]) > 0.3 && x["Origin"] == "B6N") col <- "gray"
if(col != "white") points(x=xloc, y=yloc + 0.1, pch=15,cex=0.5, col=col)
})
aa <- apply(chrInfo,1,function(x){
lines(c(0,x["Length"]), c(cnt, cnt), type="l", col="black", lty=1,lwd=2)
cnt <<- cnt + 1
})
axis(2,chrInfo[,1], at=c(1:nrow(chrInfo)), las=1)
axis(1, seq(0, mlength, 10000000)/1000000, at=seq(0, mlength, 10000000), cex.axis=0.7)
legend("topright", c("> 90% BFMI", "> 90% B6N"), fill=c("orange","gray"))
ma <- function(x,n=5){filter(x,rep(1/n,n), sides=2)}
plot(c(0, mlength), c(1,nrow(chrInfo)), t='n', main="SNP origin", yaxt="n", ylab="Chromosome", xlab="Length (Mb)", xaxt="n")
cnt <- 1
aa <- apply(matBFMIsnps, 1,function(x){
yloc <- match(as.character(x["Chr"]), chromosomes); xloc <- as.numeric(x["Loc"])
col <- "black"
if(as.numeric(x["ImprintingScore"]) > 0.1 && x["Origin"] == "BFMI") col <- "orange"
if(as.numeric(x["ImprintingScore"]) > 0.1 && x["Origin"] == "B6N") col <- "gray"
points(x=xloc, y=yloc+as.numeric(x["ImprintingScore"]), pch=19, cex=0.3, col=col)
})
aa <- apply(chrInfo,1,function(x){
lines(c(0,x["Length"]), c(cnt, cnt), type="l", col="black", lty=1,lwd=2)
cnt <<- cnt + 1
})
mB6N <- matB6Nsnps[matB6Nsnps[,"Chr"]==chr,"ImprintingScore"]
plot(mB6N, col=as.numeric(as.factor(matB6Nsnps[,"Origin"])), pch=19,cex=1)
points(ma(mB6N, 25), t='l',lwd=2)
|
4a4280030e901108644530ff8b4865545108f883
|
f1d92e4a0a155ea83a46798e2130286b9492b5df
|
/man/LDAPGroupWrite.Rd
|
fa9c80e1b168fee6a8ec2fa30e1633223e5862c4
|
[
"MIT"
] |
permissive
|
tynesjo/lookr
|
f7a1f7d5d3e19ea7f6358518972a37179fe59a45
|
aeb899fb33465641ebaac8779ec3348fb9f72900
|
refs/heads/master
| 2020-08-06T13:29:12.761916
| 2019-10-05T12:09:42
| 2019-10-05T12:09:42
| 212,991,893
| 0
| 1
|
MIT
| 2019-10-05T12:01:01
| 2019-10-05T12:01:01
| null |
UTF-8
|
R
| false
| true
| 454
|
rd
|
LDAPGroupWrite.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LDAPGroupWrite.r
\docType{data}
\name{LDAPGroupWrite}
\alias{LDAPGroupWrite}
\title{LDAPGroupWrite Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
LDAPGroupWrite
}
\description{
LDAPGroupWrite Class
}
\section{Fields}{
\describe{
\item{\code{name}}{}
\item{\code{role_ids}}{}
\item{\code{url}}{}
\item{\code{can}}{}
}}
\keyword{datasets}
|
e5e7f0635532f6acc286c9cf0650ef8b5cb85826
|
6e08794831e77d737ba6d3b15823c2020da8aa85
|
/man/copm_prep_names.Rd
|
c8e246b0c4699270ffda60b4fa5526baa871cef9
|
[] |
no_license
|
M2UCT/RFtex
|
f88a69d7adae138ca0700599bab857da159edcee
|
223d61f25e42c6a9f76ee44574cdaa180fcb5ff4
|
refs/heads/master
| 2020-04-17T18:08:34.562347
| 2019-03-14T10:09:45
| 2019-03-14T10:09:45
| 166,813,695
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 922
|
rd
|
copm_prep_names.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/company_name_functions.R
\name{copm_prep_names}
\alias{copm_prep_names}
\title{Name Preparation for Company Matching Procedure}
\usage{
copm_prep_names(names,
adj.regex = "\\\\s?holdings?\\\\s?|\\\\s?groups?\\\\s?|\\\\s?units?$")
}
\arguments{
\item{names}{A character string of company names}
\item{adj.regex}{Adjustment Regex}
}
\value{
A dataframe with different 3 columns containing different name varieties.\cr
Col 1 ('n0'): Original Name of the company.\cr
Col 2 ('n1'): Adjusted Name of the company.\cr
Col 3 ('ntype'): Adjustemt type.
}
\description{
This function takes a character string and prepares a deduplicated list with maximum 24
different variations of the name. The different name varieties are stored in the 'ntype'
}
\examples{
library(tpfuns)
copm_prep_names(names = c("BASF GMBH", "BASF AG", "BASF SE (GER)"))
}
|
0b75273d9ef51e91edf28684568baf1c8c6bb4a2
|
4f52f0e41b62996f528e224117af2adde499225f
|
/Classification Models/classification_knn.R
|
19071fd0294756d8a22d6d3af71bf283dd1beb3d
|
[] |
no_license
|
Harguna/Projects
|
8bf0d6bf844233df71b3847a022f1bea9c5e0bc6
|
802eca32883cd8dcdee744472456dcd9af15c793
|
refs/heads/master
| 2020-04-04T01:57:19.807708
| 2019-09-13T21:40:19
| 2019-09-13T21:40:19
| 155,683,657
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
classification_knn.R
|
library(kernlab)
library(caret)
library(class)
library(gmodels)
iterations = 10
arr_r=list()
arr_mae=list()
arr_R=list()
arr_acc=list()
modelName <- "knn"
InputDataFileName="E:/101510028/classificationDataSet.csv"
training = 70
dataset <- read.csv(InputDataFileName)
dataset <- dataset[sample(nrow(dataset)),]
totalDataset <- nrow(dataset)
for(i in 0:iterations){
target <- names(dataset)[1]
inputs <- setdiff(names(dataset),target)
n=21
inputs <-sample(inputs,n)
trainDataset <- dataset[1:(totalDataset * training/100),c(inputs)]
testDataset <- dataset[(totalDataset * training/100):totalDataset,c(inputs)]
train_labels <- dataset[1:(totalDataset * training/100),c(target)]
test_labels <- dataset[(totalDataset * training/100):totalDataset,c(target)]
model <- knn(train = trainDataset, test = testDataset, cl= train_labels, k=50 )
CrossTable(x= test_labels, y = model, prop.chisq = FALSE)
}
data <- cbind(arr_r, arr_R, arr_acc, arr_mae)
write.csv(data, file=paste("E:/101510028/",modelName,"-regression.csv",sep=''), row.names=TRUE)
|
17bddd528042cecccc6a5106b4f32d5e96dcfc49
|
9319b8e1f429fb15378db035df482c5d1b30233e
|
/tests/testthat/test_torr_mgl.R
|
ad9af5d6c208de15661daf26f01e3333e0836d1e
|
[] |
no_license
|
JoeyBernhardt/oxygen
|
3cdde87b9984c7921439617dcde7a965bf2e01d5
|
b75c882152d4224c9bb09077e4cccfac0e820e3b
|
refs/heads/master
| 2021-06-24T00:11:29.437815
| 2017-09-09T17:52:02
| 2017-09-09T17:52:02
| 102,791,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 150
|
r
|
test_torr_mgl.R
|
context("converting torr to mg per litre")
test_that("torr_mgl converts torr to ml per litre", {
expect_identical(torr_mgl(16, 18), 1.135008)
})
|
ca28ce77044a1f0bcb0dcc4f473ba5d1c7c5e7b8
|
9fbd34dd260879468ee3710dc80f1a96478d39f9
|
/R/deprecated/Density_uncertainty.R
|
ba6a295da6f1fd7d399651a64d61b4b12e78c76a
|
[] |
no_license
|
Kah5/bimodality
|
2fa26842ba50cdceff22a2f9eb335fc73bcec496
|
2b53dd59777292f84666ac2fcbd7031eda8ddb71
|
refs/heads/master
| 2023-02-01T05:48:49.361038
| 2023-01-21T01:25:00
| 2023-01-21T01:25:00
| 49,456,870
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,578
|
r
|
Density_uncertainty.R
|
# uncertainty around grid cell estimates of tree density:
# want to sample randomly from point level estimates within each grid cell:
# get all points in the grid cell:
# sample randomly from the distribution:
# get the sd and the mean from that random sampling?
version <- "1.7-5"
setwd( "/Users/kah/Documents/bimodality")
library(data.table)
library(reshape2)
library(ggplot2)
library(hexbin)
library(grid)
library(gridExtra)
library(sp)
library(raster)
library(rgdal)
#-----------------------------Load PLS data--------------------------------------
# read in pont level data
pls.inil <- read.csv(paste0('outputs/biomass_no_na_pointwise.ests_inilmi_v',version, '.csv'))
pls.umw <- readRDS(paste0("data/outputs/UMW_pointwise.ests_v1.7-5UMDW.RDS"))
# combine upper and lower MW:
pls.full <- rbind(pls.inil[,c("x", "y", "Pointx","Pointy", "cell", "spec", "count", "point", "density", "basal", "diams")], pls.umw[!is.na(pls.umw$density),c("x", "y","Pointx","Pointy", "cell", "spec", "count", "point", "density", "basal", "diams")])
# get the mean value for each species at each PLS point:
pls.spec <- dcast(pls.full, Pointx + Pointy + x + y + cell ~ spec, mean, na.rm = TRUE, value.var = 'density')
# get estimate of total tree density at each point:
pls.spec$density <- rowSums(pls.spec[,!names(pls.spec)%in% c("x", "y","Pointx", "Pointy", "cell", "Water", "wet")], na.rm=TRUE) # sum species density in the grid cell
pls <- pls.spec
pls <- pls[!is.na(pls$density),] # remove all NA values for density
# Option 1: finding mean and sd of each grid cell
pls.mean <- dcast(pls, x + y + cell ~., mean, na.rm = TRUE, value.var = 'density') # we want to sum the densities of all the species in each cells, then divide by the # of pls points within the cell, so take the avg
pls.sd <- dcast(pls, x + y + cell ~., sd, na.rm = TRUE, value.var = 'density') # we want to sum the densities of all the species in each cells, then divide by the # of pls points within the cell, so take the avg
colnames(pls.mean) <- c('x', 'y', 'cell','PLSdensity')
colnames(pls.sd) <- c('x', 'y', 'cell','density_sd')
hist(pls.mean$PLSdensity, xlim = c(0, 600),breaks = 1000)
pls.basic <- merge(pls.mean, pls.sd, by = c("x", "y", "cell"))
label.breaks <- function(beg, end, splitby){
labels.test <- data.frame(first = seq(beg, end, by = splitby), second = seq((beg + splitby), (end + splitby), by = splitby))
labels.test <- paste (labels.test$first, '-' , labels.test$second)
labels.test
}
pls.basic$sd_bins <- cut(pls.basic$density_sd, breaks = seq(-1,600, by = 20), labels = label.breaks(0,580, 20))
ggplot(pls.basic, aes(x,y, fill = sd_bins))+geom_raster()
ggplot(pls.basic, aes(x,y, fill = PLSdensity))+geom_raster()
ggplot(pls.basic, aes(x = PLSdensity))+geom_histogram()
png("outputs/full_MW_dens_histogram_by_sd.png")
ggplot(pls.basic, aes(PLSdensity, fill = sd_bins))+geom_histogram(position = "stack")+theme_bw()+xlim(0,600)+ylim(0,2000)
dev.off()
# Option 2: Bootstrapping mean and 95% CI of the data in each grid cell:
library(boot)
func.mean <- function(d, i){
d2 <- d[i,]
return(mean(d2$density, na.rm=TRUE))
}
bootcorr <- boot(pls[pls$cell %in% 40599,], func.mean, R=500)
bootcorr
# compare to regular mean:
mean.dens <- mean(pls[pls$cell %in% 40599,]$density, na.rm = TRUE)
boot.ci(bootcorr, type = "bca")
# compare to regular
sd.dens <- sd(pls.inil[pls.inil$cell %in% 40599,]$density, na.rm = TRUE)
mean.dens + sd.dens
mean.dens - sd.dens
pls.inil2 <- pls.inil[1:100,c("x", "y", "cell", "density")]
density.samples <- list()
# create a function that does the bootstrapped CI intervals
boot.calcs <- function(x){
func.mean <- function(d, indices){
d2 <- d[indices]
return(mean(d2, na.rm=TRUE))
}
bootcorr <- boot(x, stat = func.mean, R=1000)
# compare to regular means
bootci <- boot.ci(bootcorr, type = "perc")
if(is.null(bootci$percent[4])){
out <- data.frame(mean = bootcorr$t0,
ci.low = NA,
ci.high = NA)
}else{
out <- data.frame(mean = bootcorr$t0,
ci.low = bootci$percent[4],
ci.high = bootci$percent[5])
}
out
}
boot.dens <- function(x){
func.mean <- function(d, indices){
d2 <- d[indices]
return(mean(d2, na.rm=TRUE))
}
bootcorr <- boot(x, stat = func.mean, R=1000)
density.samples <- bootcorr$t
density.samples
}
# create a list of densities by each cell:
dens.by.cells <- split(pls$density, pls$cell)
# apply the "boot.calcs" function over all cells:
dens.ci.mean <- lapply( dens.by.cells, FUN = boot.calcs)
dens.ci.mean.df <- do.call(rbind, dens.ci.mean)
dens.ci.mean.df$cell<- row.names(dens.ci.mean.df)
dens.ci.df <- merge(dens.ci.mean.df, pls.mean[,c("x", "y", "cell", "PLSdensity")], by = "cell")
#dens.boot.samples <- lapply( dens.by.cells, FUN = boot.dens)
# how does the above method compare to getting estimates "by hand" --i.e. not using default CI and bootstrap function:
# following Brett Larget's example: http://www.stat.wisc.edu/~larget/stat302/chap3.pdf
# A quick bootstrap function for a confidence interval for the mean
# x is a single quantitative sample
# B is the desired number of bootstrap samples to take # binwidth is passed on to geom_histogram()
boot.mean = function(x,binwidth=NULL) {
B = 1000
n = length(x)
boot.samples = matrix( sample(x,size=n*B,replace=TRUE), B, n)
boot.statistics = apply(boot.samples,1,mean)
se = sd(boot.statistics)
require(ggplot2)
if ( is.null(binwidth) )
binwidth = diff(range(boot.statistics))/30
p = ggplot(data.frame(x=boot.statistics),aes(x=x)) +
geom_histogram(aes(y=..density..),binwidth=binwidth) + geom_density(color="red")
plot(p)
interval = mean(x) + c(-1,1)*2*se
print( interval )
return( list(boot.statistics = boot.statistics, interval=interval, se=se, plot=p) )
}
boot.mean(dens.by.cells[[1]]) #, B = 500)
boot.calcs(dens.by.cells[[1]])
# comparing both of these methods yeilds very similar CI estimates
#-------------------- get the density uncertainty from the FIA data:
FIA <- read.csv('data/FIA_species_plot_parameters_paleongrid.csv') # read in FIA data
speciesconversion <- read.csv('data/FIA_conversion-SGD_remove_dups.csv') # conversion table for converting FIA nomeclature to Paleon taxa
FIA.pal <- merge(FIA, speciesconversion, by = 'spcd' )
# how we would normally calculate density:
FIA.by.paleon <- dcast(FIA.pal, x + y+ cell + plt_cn ~ PalEON, sum, na.rm=TRUE, value.var = 'density') #sum all species in common taxa in FIA grid cells
FIA.by.paleon$FIAdensity <- rowSums(FIA.by.paleon[,6:25], na.rm = TRUE) # sum the total density in each plot--This is what we will use to get bootstrapped average total density +ci
fia.melt <- melt(FIA.by.paleon, id.vars = c('x', 'y', 'cell', 'plt_cn', 'Var.5')) # melt the dataframe
fia.by.plot <- dcast(fia.melt, x + y +cell+ plt_cn ~ variable, sum, na.rm=TRUE, value.var = 'value') # average species densities and total density within each grid cell
melted.fia <- melt(fia.by.plot[,c('x', "y", "cell", "plt_cn", "FIAdensity")], id.vars = c('x', "y", "cell", "plt_cn"))
fia.by.cell <- ddply(melted.fia,~ cell,summarise,mean=mean(value),total = sum(value),sd=sd(value), x = mean(x), y = mean(y))
# create a list of densities by cell for the FIA
fdens.by.cells <- split(FIA.by.paleon$FIAdensity, FIA.by.paleon$cell)
# apply the "boot.calcs" function over all FIA cells:
fdens.ci.mean <- lapply( fdens.by.cells, FUN = boot.calcs )
fdens.ci.mean.df <- do.call( rbind, fdens.ci.mean )
fdens.ci.mean.df$cell <- row.names(fdens.ci.mean.df)
fdens.ci.df <- merge(fdens.ci.mean.df, test, by = "cell")
fdens.ci.df <- merge(fdens.ci.mean.df, fia.by.cell, by = "cell")
colnames(fdens.ci.df)<- c("cell", "mean.fia", "ci.low.fia", "ci.high.fia", "FIAdensity","FIAdenssd","x", "y")
# merge FIA and PLS together
alldens <- merge(dens.ci.df, fdens.ci.df, by = c("x", "y", "cell"))
summary(alldens)
alldens.m <- melt(alldens[,c("x", "y", "cell","ci.low", "ci.high", "PLSdensity", "ci.low.fia", "ci.high.fia", "FIAdensity")], id.vars = c('x', "y", "cell"))
dens.with.ci <- alldens.m[complete.cases(alldens.m),]
# -----------------plot histograms of density and histograms of the low and high confidence intervals:------------
png(height = 6, width = 7, units = "in", res = 300, "outputs/density_unc/PLSdensity_hist_MW_with_ci.png")
ggplot()+geom_histogram(data = alldens.m[alldens.m$variable %in% c("PLSdensity"),], aes(value, fill = variable,position = "identity", binwidth = 18))+
geom_density(data =alldens.m[alldens.m$variable %in% c("PLSdensity", "ci.low", "ci.high"),] ,aes(value, color = variable, 20 *..count.., linetype = variable), size = 1.2)+scale_color_manual(values = c("grey", "grey", "red"))+
scale_linetype_manual(values=c("dashed", "dotted", "solid"))+theme_bw(base_size = 20)+xlim(0,600)
dev.off()
png(height = 4, width = 7, units = "in", res = 300, "outputs/density_unc/PLS_MW_histograms.png")
ggplot(dens.with.ci[dens.with.ci$variable %in% c("PLSdensity", "ci.low", "ci.high"),], aes(value, color = variable))+geom_histogram(position = "identity",alpha = 0.5)+theme_bw()+xlim(0,400)+facet_wrap(~variable)
dev.off()
# plotting the fia data makes less sense because we can only get ci from grid cells with more than 1 fia plot--which are sparse in in & IL
png(height = 6, width = 7, units = "in", res = 300, "outputs/density_unc/FIAdensity_hist_MW_with_ci.png")
ggplot() + geom_histogram(data = dens.with.ci[dens.with.ci$variable %in% c("FIAdensity"),], aes(value, fill = variable, position = "identity", binwidth = 18))+
geom_density(data = dens.with.ci[dens.with.ci$variable %in% c("FIAdensity", "ci.low.fia", "ci.high.fia"),] ,aes(value, color = variable, 25 *..count.., linetype = variable), size = 1.2)+xlim(0,600)+scale_color_manual(values = c("grey", "grey", "red"))+
scale_linetype_manual(values=c("dashed", "dotted", "solid")) + theme_bw(base_size = 20)
dev.off()
# histogram of FIA + 95% ci
png(height = 4, width = 7, units = "in", res = 300, "outputs/density_unc/FIA_MW_histograms.png")
ggplot(dens.with.ci[dens.with.ci$variable %in% c("FIAdensity", "ci.low.fia", "ci.high.fia"),], aes(value, color = variable))+geom_histogram(position = "identity",alpha = 0.5)+theme_bw()+xlim(0,600)+facet_wrap(~variable)
dev.off()
# make a histogram with both FIA and PLS density + 95% CI (this looks really messy)
clean.dens.with.ci <- dens.with.ci[dens.with.ci$value <= 600, ]
density.hists <- ggplot() + geom_histogram(data = clean.dens.with.ci[clean.dens.with.ci$variable %in% c("FIAdensity", "PLSdensity"),], aes(value, fill = variable, alpha = 0.5 ), position = "identity", binwidth = 25)+
geom_density(data = clean.dens.with.ci[clean.dens.with.ci$variable %in% c("FIAdensity", "ci.low.fia", "ci.high.fia"),] ,aes(value, color = variable, 30 *..count.., linetype = variable), size = 1.2)+xlim(0,600)#scale_color_manual(values = c("light.blue", "light.blue", "blue"))+xlim(0,600)+
png(height = 4, width = 7, units = "in", res = 300, "outputs/density_unc/FIA_PLS_MW_histograms.png")
density.hists + geom_density(data = clean.dens.with.ci[clean.dens.with.ci$variable %in% c("PLSdensity", "ci.low", "ci.high"),] ,aes(value, color = variable, 30 *..count.., linetype = variable), size = 1.2)+scale_color_manual(values = c("salmon", "dodgerblue", "salmon","dodgerblue", "blue", "red"))+xlim(0,600)+
scale_linetype_manual(values=c("dashed", "dashed", "dotted","dotted", "solid", "solid"))+ theme_bw(base_size = 20)
dev.off()
# -----------------------mapping out density estimates, CI intervals, and classification:---------------------
dens.ci.df$uncertainty <- (dens.ci.df$ci.high - dens.ci.df$ci.low)/2
dens.ci.df.m <- melt(dens.ci.df[,c("x", "y", "cell", "PLSdensity", "ci.low", "ci.high")], id.vars = c("x", "y", "cell"))
dens.ci.df.m$ecoclass <- ifelse(dens.ci.df.m$value >= 47, "Forest", ifelse(dens.ci.df.m$value >= 1, "Savanna", "Prairie" ))
alldens$uncertainty <- (alldens$ci.high.fia - alldens$ci.low.fia)/2
fdens.ci.df.m <- melt(alldens[,c("x", "y", "cell", "FIAdensity", "ci.low.fia", "ci.high.fia")], id.vars = c("x", "y", "cell"))
fdens.ci.df.m$ecoclass <- ifelse(fdens.ci.df.m$value >= 47, "Forest", ifelse(fdens.ci.df.m$value >= 1, "Savanna", "Prairie" ))
# map out the density and CI estimates for PLS
# need to set up state outlines:
all_states <- map_data("state")
states <- subset(all_states, region %in% c( "wisconsin","minnesota" ,"michigan", "illinois", 'indiana') )
coordinates(states)<-~long+lat
class(states)
proj4string(states) <-CRS("+proj=longlat +datum=NAD83")
mapdata<-spTransform(states, CRS('+init=epsg:3175'))
mapdata <- data.frame(mapdata)
sc <- scale_colour_gradientn(colours = rev(terrain.colors(8)), limits=c(0, 16))
cbpalette <- c("#ffffcc", "#c2e699", "#78c679", "#31a354", "#006837")
cbpal_unc <- c('white', '#fecc5c', '#fd8d3c','#f03b20', '#bd0026')
dens.pr <- read.csv("data/PLS_full_dens_pr_with_bins.csv")
ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=dens.pr, aes(x=x, y=y, fill = PLSdensity))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+ #+
scale_fill_gradientn(colours = cbpalette, limits = c(0,600), name ="Tree \n Density \n (trees/hectare)", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=dens.ci.df.m[dens.ci.df.m$variable %in% "PLSdensity",], aes(x=x, y=y, fill = value))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+ #+
scale_fill_gradientn(colours = cbpalette, limits = c(0,650), name ="Tree \n Density \n (trees/hectare)", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
pls.dens.ci.maps <- ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=dens.ci.df.m, aes(x=x, y=y, fill = value))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+ #+
scale_fill_gradientn(colours = cbpalette, limits = c(0,650), name ="Tree \n Density \n (trees/hectare)", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())+facet_wrap(~variable)#+ annotate("text", x=-90000, y=1486000,label= "A", size = 5)+ggtitle("")
png(height = 5, width = 9, units = "in", res = 300, "outputs/density_unc/map_density_ci_MW_pls.png")
pls.dens.ci.maps
dev.off()
# make a map of the assigned ecoclass based on using low and high CI values as density
pls.class.ci.maps<- ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=dens.ci.df.m, aes(x=x, y=y, fill = ecoclass))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+ #+
scale_fill_manual(values = c("#ffffcc", "#78c679", "#006837"), limits=c("Prairie", "Savanna", "Forest"), name ="Ecocode", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())+facet_wrap(~variable)#+ annotate("text", x=-90000, y=1486000,label= "A", size = 5)+ggtitle("")
png(height = 5, width = 9, units = "in", res = 300, "outputs/density_unc/map_class_ci_MW_pls.png")
pls.class.ci.maps
dev.off()
# Map out density and CI intervals for FIA
fia.dens.ci.maps<- ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=fdens.ci.df.m, aes(x=x, y=y, fill = value))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+ #+
scale_fill_gradientn(colours = cbpalette, limits = c(0,650), name ="Tree \n Density \n (trees/hectare)", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())+facet_wrap(~variable)#+ annotate("text", x=-90000, y=1486000,label= "A", size = 5)+ggtitle("")
png(height = 5, width = 9, units = "in", res = 300, "outputs/density_unc/map_density_ci_MW_fia.png")
fia.dens.ci.maps
dev.off()
# map out ecoclassificaiton based on density and CI intervals for FIA
fia.class.ci.maps<- ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=fdens.ci.df.m, aes(x=x, y=y, fill = ecoclass))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+ #+
scale_fill_manual(values = c("#ffffcc", "#78c679", "#006837"), limits=c("Prairie", "Savanna", "Forest"), name ="Ecocode", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())+facet_wrap(~variable)#+ annotate("text", x=-90000, y=1486000,label= "A", size = 5)+ggtitle("")
png(height = 5, width = 9, units = "in", res = 300, "outputs/density_unc/map_class_ci_inil_fia.png")
fia.class.ci.maps
dev.off()
# map out density and uncertainty for the FIA era:
f.density.map <- ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=alldens, aes(x=x, y=y, fill = FIAdensity))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+scale_fill_gradientn(colours = cbpalette, limits = c(0,650), name ="Tree Density", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
f.uncertainty.map <- ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=alldens, aes(x=x, y=y, fill = uncertainty))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+scale_fill_gradientn(colours = cbpal_unc, limits = c(0,500), name =" Uncertainty", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
png(height = 5, width = 9, units = "in", res = 300, "outputs/density_unc/map_density_uncertainty_MW_fia.png")
grid.arrange(f.density.map, f.uncertainty.map, ncol = 2)
dev.off()
# Map out the Density & Uncertainty for PLS era
density.map <- ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=dens.ci.df, aes(x=x, y=y, fill = PLSdensity))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+scale_fill_gradientn(colours = cbpalette, limits = c(0,650), name ="Tree Density", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
uncertainty.map <- ggplot()+ geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), fill = 'darkgrey')+
geom_raster(data=dens.ci.df, aes(x=x, y=y, fill = uncertainty))+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat),colour="black", fill = NA)+
labs(x="easting", y="northing")+scale_fill_gradientn(colours = cbpal_unc, limits = c(0,250), name =" Uncertainty", na.value = 'darkgrey') +
coord_equal()+theme_bw(base_size = 10)+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
png(height = 5, width = 9, units = "in", res = 300, "outputs/density_unc/map_density_uncertainty_MW_pls.png")
grid.arrange(density.map, uncertainty.map, ncol = 2)
dev.off()
#-------------------------------------------------------------------------------------------------------
# option # 2part b: sample 1 value from grid cell distribution, do this for all grid cells-> is it bimodal? Do this several times
library(boot)
# this function samples 100 pls points from each grid cell & takes the mean of those samples
samp.dens <- function(x){ mean(sample(x,100,replace = TRUE),na.rm=TRUE)}
sample100 <- function(df){
test <- lapply(df, function(x){ mean(sample(x,100,replace = TRUE),na.rm=TRUE)})
test2 <- do.call("rbind", test)
test2
}
#next we do the sample100 function 100 times, so we generate 100 histograms:
fpoint.dens.mat <- matrix(fdens.by.cells, nrow = length(fdens.by.cells), ncol = 100 ) # make 251 by 20 matrix
fcell.dens.mat <- apply(X = fpoint.dens.mat, FUN = sample100, MARGIN = 2)
ggplot(data.frame(fcell.dens.mat), aes(x = fcell.dens.mat[,10]))+geom_histogram()
#hist(cell.dens.mat[,10], breaks = 100)$breaks
#hist(cell.dens.mat[,3], breaks = 100)$count
# if we get the CI on histogram without removing the prairie ecoclass:
fbreaks <- apply(fcell.dens.mat,FUN = function(x) {hist(x, xlim = c(0,600), breaks = 100)$breaks[2:101]}, MARGIN = 2)
fcounts <- apply(fcell.dens.mat,FUN = function(x) {hist(x, xlim = c(0,600), breaks = 100)$count}, MARGIN = 2)
fcounts.df <- do.call("rbind", fcounts)
fcounts.df <- t(fcounts.df)
fcount.sd <- apply(fcounts.df, FUN = sd, MARGIN=1)
fcount.95 <- apply(fcounts.df, FUN = function(x){quantile(x,.95)}, MARGIN = 1)
fcount.5 <- apply(fcounts.df, FUN = function(x){quantile(x,.05)}, MARGIN = 1)
fcount.mean <- apply(fcounts.df, FUN = mean, MARGIN = 1)
#plot( breaks[1:100,1], count.mean[1:100])
count.sds <- data.frame(counts = count.mean[1:100], breaks = breaks[1:100,1], sd = count.sd[1:100], min.sd = count.mean[1:100] - count.sd[1:100], max.sd = count.mean[1:100] + count.sd[1:100],
ci.95 = count.95[1:100], ci.5 = count.5[1:100])
# plot histogram bar plot with +/- SD
ggplot(count.sds, aes(breaks, counts))+geom_bar(stat = "identity")+geom_errorbar(data = count.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
ggplot(count.sds, aes(breaks, ci.95))+geom_bar(stat = "identity")#+geom_errorbar(data = count.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
#---------------------do this for cells with prairie cells removed---------------------
#next we do the sample100 function 100 times, so we generate 100 histograms:
point.dens.mat <- matrix(dens.by.cells, nrow = length(dens.by.cells),ncol = 100 ) # make 251 by 20 matrix
cell.dens.mat <- apply(X = point.dens.mat, FUN = sample100, MARGIN = 2)
cell.dens <- as.data.frame(cell.dens.mat)
cell.dens$cell <- names(dens.by.cells)
write.csv(cell.dens, "outputs/density_100samples_by_cell.csv")
cell.dens.mat[cell.dens.mat < 1 ] <- NA # get rid of prairie cells
cell.dens.mat[cell.dens.mat > 1000 ] <- NA # get rid of high density cells
# get the CI on histogram after removing the prairie ecoclass:
# note, we are generatinge CI on counts for 40 bins here
breaks <- apply(cell.dens.mat,FUN = function(x) {hist(x, xlim = c(0,600), breaks = 40)$breaks[2:101]}, MARGIN = 2)
counts <- apply(cell.dens.mat,FUN = function(x) {hist(x, xlim = c(0,600), breaks = 40)$count}, MARGIN = 2)
counts.df <- do.call("rbind", counts)
counts.df <- t(counts.df)
count.sd <- apply(counts.df, FUN = sd, na.rm=TRUE, MARGIN=1)
count.95 <- apply(counts.df, FUN = function(x){quantile(x,.95,na.rm=TRUE)}, MARGIN = 1)
count.5 <- apply(counts.df, FUN = function(x){quantile(x,.05,na.rm=TRUE)}, MARGIN = 1)
count.mean <- apply(counts.df, FUN = mean,na.rm=TRUE, MARGIN = 1)
#plot( breaks[1:100,1], count.mean[1:100])
count.sds <- data.frame(counts = count.mean[1:100], breaks = breaks[1:100,1], sd = count.sd[1:100], min.sd = count.mean[1:100] - count.sd[1:100], max.sd = count.mean[1:100] + count.sd[1:100],
ci.95 = count.95[1:100], ci.5 = count.5[1:100])
# plot histogram bar plot with +/- SD
ggplot(count.sds, aes(breaks, counts))+geom_bar(stat = "identity")+geom_errorbar(data = count.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
ggplot(count.sds, aes(breaks, ci.95))+geom_bar(stat = "identity")#+geom_errorbar(data = count.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
ggplot(count.sds, aes(breaks, ci.95))+geom_bar(stat = "identity")#+geom_errorbar(data = count.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
png(width = 6, height = 4, units = "in", res = 300, "outputs/density_unc/PLS_counts_unc_barplot.png")
ggplot(count.sds, aes(breaks, counts) )+geom_bar(stat = "identity",fill = "blue")+geom_ribbon(aes(ymin=ci.5, ymax=ci.95),fill="darkgrey", alpha=0.9)+xlim(0,600)+theme_bw()+xlab("Tree Density")
dev.off()
png(width = 6, height = 4, units = "in", res = 300, "outputs/density_unc/PLS_counts_unc_barplot_errorbars.png")
ggplot(count.sds, aes(breaks, counts) )+geom_bar(stat = "identity",fill = "blue")+geom_errorbar(aes(ymin=ci.5, ymax=ci.95),width = 1)+xlim(0,600)+theme_bw()+xlab("Tree Density")
dev.off()
png(width = 6, height = 4, units = "in", res = 300, "outputs/density_unc/PLS_counts_unc_lineplot.png")
ggplot(count.sds, aes(breaks, counts) )+geom_line(color = "blue", width = 1)+geom_ribbon(aes(ymin=ci.5, ymax=ci.95),alpha = 0.5)+xlim(0,600)+theme_bw()+xlab("Tree Density")
dev.off()
#-------------------------CI estimation for FIA histogram data---------------------------------
library(boot)
# this function samples 100 pls points from each grid cell & takes the mean of those samples
samp.dens <- function(x){ mean(sample(x,100,replace = TRUE),na.rm=TRUE)}
sample100 <- function(df){
test <- lapply(df, function(x){ mean(sample(x,100,replace = TRUE),na.rm=TRUE)})
test2 <- do.call("rbind", test)
test2
}
#next we do the sample100 function 100 times, so we generate 100 histograms:
fpoint.dens.mat <- matrix(fdens.by.cells, nrow = length(fdens.by.cells), ncol = 100 ) # make 251 by 20 matrix
fcell.dens.mat <- apply(X = fpoint.dens.mat, FUN = sample100, MARGIN = 2)
ggplot(data.frame(fcell.dens.mat), aes(x = fcell.dens.mat[,10]))+geom_histogram()
# if we get the CI on histogram without removing the prairie ecoclass:
fbreaks <- apply(fcell.dens.mat,FUN = function(x) {hist(x, xlim = c(0,600), breaks = 100)$breaks[2:101]}, MARGIN = 2)
fcounts <- apply(fcell.dens.mat,FUN = function(x) {hist(x, xlim = c(0,600), breaks = 100)$count}, MARGIN = 2)
fcounts.df <- do.call("rbind", fcounts)
fcounts.df <- t(fcounts.df)
fcount.sd <- apply(fcounts.df, FUN = sd, MARGIN=1)
fcount.95 <- apply(fcounts.df, FUN = function(x){quantile(x,.95)}, MARGIN = 1)
fcount.5 <- apply(fcounts.df, FUN = function(x){quantile(x,.05)}, MARGIN = 1)
fcount.mean <- apply(fcounts.df, FUN = mean, MARGIN = 1)
#plot( breaks[1:100,1], count.mean[1:100])
fcount.sds <- data.frame(counts = fcount.mean[1:100], breaks = fbreaks[1:100,1], sd = fcount.sd[1:100], min.sd = fcount.mean[1:100] - fcount.sd[1:100], max.sd = fcount.mean[1:100] + fcount.sd[1:100],
ci.95 = fcount.95[1:100], ci.5 = fcount.5[1:100])
# plot histogram bar plot with +/- SD
ggplot(fcount.sds, aes(breaks, counts))+geom_bar(stat = "identity")+geom_errorbar(data = fcount.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
ggplot(fcount.sds, aes(breaks, ci.95))+geom_bar(stat = "identity")#+geom_errorbar(data = count.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
#---------------------do this for all FIA cells with corresponding PLS cells---------------------
#next we do the sample100 function 100 times, so we generate 100 histograms:
inPLS<- names(dens.by.cells)
UMW.by.cells <- fdens.by.cells[names(fdens.by.cells) %in% inPLS]
fpoint.dens.mat <- matrix(UMW.by.cells , nrow = length(UMW.by.cells ),ncol = 100 ) # make 251 by 20 matrix
fcell.dens.mat <- apply(X = fpoint.dens.mat, FUN = sample100, MARGIN = 2)
fcell.dens.mat[fcell.dens.mat > 1000 ] <- NA # get rid of prairie cells
# if we get the CI on histogram without removing the prairie ecoclass:
fbreaks <- apply(fcell.dens.mat,FUN = function(x) {hist(x, xlim = c(0,600), breaks = 40)$breaks[2:101]}, MARGIN = 2)
fcounts <- apply(fcell.dens.mat,FUN = function(x) {hist(x, xlim = c(0,600), breaks = 40)$count}, MARGIN = 2)
fcounts.df <- do.call("rbind", fcounts)
fcounts.df <- t(fcounts.df)
fcount.sd <- apply(fcounts.df, FUN = sd, na.rm=TRUE, MARGIN=1)
fcount.95 <- apply(fcounts.df, FUN = function(x){quantile(x,.95,na.rm=TRUE)}, MARGIN = 1)
fcount.5 <- apply(fcounts.df, FUN = function(x){quantile(x,.05,na.rm=TRUE)}, MARGIN = 1)
fcount.mean <- apply(fcounts.df, FUN = mean,na.rm=TRUE, MARGIN = 1)
#plot( breaks[1:100,1], count.mean[1:100])
fcount.sds <- data.frame(counts = fcount.mean[1:100], breaks = fbreaks[1:100,1], sd = fcount.sd[1:100], min.sd = fcount.mean[1:100] - fcount.sd[1:100], max.sd = fcount.mean[1:100] + fcount.sd[1:100],
ci.95 = fcount.95[1:100], ci.5 = fcount.5[1:100])
# plot histogram bar plot with +/- SD
ggplot(fcount.sds, aes(breaks, counts))+geom_bar(stat = "identity")+geom_errorbar(data = fcount.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
ggplot(fcount.sds, aes(breaks, ci.95))+geom_bar(stat = "identity")#+geom_errorbar(data = count.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
ggplot(fcount.sds, aes(breaks, ci.95))+geom_bar(stat = "identity")#+geom_errorbar(data = count.sds, aes(ymin=min.sd, ymax=max.sd),width=1)
png(width = 6, height = 4, units = "in", res = 300, "outputs/density_unc/FIA_counts_unc_barplot.png")
ggplot(fcount.sds, aes(breaks, counts) )+geom_bar(stat = "identity",fill = "blue")+geom_ribbon(aes(ymin=ci.5, ymax=ci.95),fill="darkgrey", alpha=0.9)+xlim(0,600)+theme_bw()+xlab("Tree Density")
dev.off()
png(width = 6, height = 4, units = "in", res = 300, "outputs/density_unc/FIA_counts_unc_barplot_errorbars.png")
ggplot(fcount.sds, aes(breaks, counts) )+geom_bar(stat = "identity",fill = "blue")+geom_errorbar(aes(ymin=ci.5, ymax=ci.95),width = 1)+xlim(0,600)+theme_bw()+xlab("Tree Density")
dev.off()
png(width = 6, height = 4, units = "in", res = 300, "outputs/density_unc/FIA_counts_unc_lineplot.png")
ggplot(fcount.sds, aes(breaks, counts) )+geom_line(color = "blue", width = 1)+geom_ribbon(aes(ymin=ci.5, ymax=ci.95),alpha = 0.5)+xlim(0,600)+theme_bw()+xlab("Tree Density")
dev.off()
# ---------------lets plot the PLS and FIA datasets together:
png(width = 6, height = 4, units = "in", res = 300, "outputs/density_unc/PLS_FIA_counts_unc_barplot_40bins.png")
ggplot(count.sds, aes(breaks, counts, fill ="PLS") )+geom_bar(stat = "identity",fill = 'blue', alpha = 0.3)+geom_ribbon(aes(ymin=ci.5, ymax=ci.95),fill="lightblue", alpha=0.9)+
geom_bar(data = fcount.sds, aes(breaks, counts) ,stat = "identity",fill = "red", alpha = 0.3)+geom_ribbon(data = fcount.sds,aes(ymin=ci.5, ymax=ci.95),fill="pink", alpha=0.9)+xlim(0,600)+theme_bw()+xlab("Tree Density")
dev.off()
#-----------------Incorporating density uncertainty into evaluation of bimodality overall:--------------------
full <- read.csv("outputs/cluster/full_comp_dens_df.csv")
|
a032ebcd53ecef153fb13540a6e074529f9688a0
|
d3277755db582f0bf977b736fa7d464738dad9fe
|
/R/calculate_reporting_delays.R
|
93b06f9af1c3bfca2c9fc570c4ad864f22ff4319
|
[] |
no_license
|
njtierney/freerange-covid
|
54af1b47cf154baae913444e636de0cace2bc81d
|
1c167beda65ba6df4378168bf93023d8ac88c5cf
|
refs/heads/main
| 2022-12-30T11:05:22.538242
| 2020-10-25T05:46:28
| 2020-10-25T05:46:28
| 305,871,744
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 636
|
r
|
calculate_reporting_delays.R
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @return
##' @author Nicholas Tierney
##' @export
calculate_reporting_delays <- function() {
#------------Reporting delays---------------------
# From the documentation re delays: " (assuming a lognormal distribution with all
# parameters excepting the max allowed value on the log scale).
set.seed(123)
reporting_delay <- EpiNow2::bootstrapped_dist_fit(rlnorm(100, log(6), 1))
## Set max allowed delay to 30 days to truncate computation
reporting_delay$max <- 30
reporting_delay
}
|
215bfa087b8aab7b2a82e6092873db2aeb545a1b
|
616e8ba5e7356a3b493062cd8095fa98455d12f1
|
/Archive/get.per.audit.gamblers.fallacy.R
|
b393f90735af86de2082dff8553e3f7dfad9e3e7
|
[] |
no_license
|
Breakend/RIBSS_tax_evasion_ABM
|
0813ecc2ac964d0d68a12fb5b4559f26d25b502d
|
f64c4b8ab1e02a95fa3e032fbcb3b37647eeb017
|
refs/heads/master
| 2022-02-21T14:40:09.271146
| 2019-09-03T20:39:18
| 2019-09-03T20:39:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
get.per.audit.gamblers.fallacy.R
|
get.per.audit.gamblers.fallacy<-function(x,n,l=2){
### In the model the gamblers.fallacy effect called for year n but modifies and acts on
### percived audit rate for year n+1. Thus when called n+1 represents the number of years since beginning
### to under-report.
m<-sapply(n,FUN=function(z) if(z>l){return(max(1,2*l-z+1))}else{return(z+1)})
return( ((1-(1-x)^m)) )
}
|
48788f6937b614ee56960b49adeb4be42e1368bf
|
f2aa5c24567724bd3d2d20c21ae4cba4d7e268be
|
/R-Data-Visualization-Recipes-master/Chapter08/chap8_bundle/8_7_publication_contour_HOW_TO_DO_IT.R
|
c2a10b468386e036316c4fdc69fa7f59d9ca9f2b
|
[
"MIT"
] |
permissive
|
cyrsis/RSandBox
|
ab3a2548cbd2a0abaf6fcf2e8f2d42951d3ab80f
|
b647cda2e30702de408b1e223cb898d2bb05c01a
|
refs/heads/master
| 2020-03-21T19:50:21.523273
| 2018-06-28T09:31:36
| 2018-06-28T09:31:36
| 138,972,521
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,411
|
r
|
8_7_publication_contour_HOW_TO_DO_IT.R
|
## STEP 1: DRAW BASE CONTOUR PLOT
library(ggplot2)
h1 <- ggplot(cars, aes(x = speed, y = dist)) +
stat_density_2d(aes(colour = ..level..), size = 1.2) +
theme_minimal()
## STEP 2: PICK A BETTER COLOR SCALE
h2 <- h1 + scale_colour_distiller(direction = 1, name = 'density',
breaks = seq(0.0002,0.0014,0.0002),
labels = format(seq(0.0002,0.0014,0.0002),
scientific = F))
## STEP 3: REWRITE AXES LABELS
h3 <- h2 + xlab('speed (mph)') + ylab('distance (ft)')
## STEP 4: GROW AXES BIGGER
h4 <- h3 + scale_y_continuous(breaks = seq(0,130,10),
labels = seq(0,130,10),
minor_breaks = 0) +
scale_x_continuous(breaks = seq(4,26,2),
labels = seq(4,26,2),
minor_breaks = 0)
## STEP 5: MOVE LEGENDS AND RESIZE TEXTS IN GENERAL
h5 <- h4 + theme(legend.position = c(.2,.75),
legend.background = element_rect(color = "black",
size = .2,
linetype = "solid"),
legend.text = element_text(size = 13),
legend.title = element_text(size = 14),
axis.text = element_text(size = 15),
axis.title = element_text(size = 18))
## STEP 6: AVOID CONFUSING LEGENDS
h5 + guides(colour = guide_legend(title.vjust = .1))
|
cc3cc451e7bdf4b6045b459e8aef04d7f8f11f04
|
506a2b1bee7c97655ca5bf94a2d6edfd11e698e2
|
/man/setPosTime.Rd
|
f12f6b3d1e5ddabc7b83bba36bd0dc3707de3ae5
|
[] |
no_license
|
emanuelhuber/GauProMod
|
3662d5c2cf1136865f6d45210c8ce938634c03ed
|
f2d970e3de95da56c6fb029a5f44c5ceac06d98e
|
refs/heads/master
| 2021-07-18T19:55:08.050846
| 2021-07-13T08:54:32
| 2021-07-13T08:54:32
| 56,522,805
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 218
|
rd
|
setPosTime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{setPosTime}
\alias{setPosTime}
\title{Reshape target}
\usage{
setPosTime(xy, tt, val, xystar)
}
\description{
Reshape target
}
|
6700c3d470581fecc27ad8fe8c7a16f32055435e
|
8c205505fc8e1b72e5f9c637e7faea6e64e4fd9a
|
/man/SNormParameter-class.Rd
|
04c9ef771de3c4bfe9262e82207cee729f4dcd61
|
[] |
no_license
|
cran/distrRmetrics
|
273685a9a442bb08fd7e300a9226b665ffa4cdfa
|
008251f66276fc1092cde486091437a54c01d65e
|
refs/heads/master
| 2022-11-21T00:32:21.206044
| 2022-11-12T21:30:08
| 2022-11-12T21:30:08
| 17,695,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,689
|
rd
|
SNormParameter-class.Rd
|
\name{SNormParameter-class}
\docType{class}
\alias{SNormParameter-class}
\alias{mean,SNormParameter-method}
\alias{sd,SNormParameter-method}
\alias{xi,SNormParameter-method}
\title{Parameter of an SNorm distributions}
\description{The class of the parameter of an SNorm distribution.}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("SNormParameter", ...)}.
}
\section{Slots}{
\describe{
\item{\code{mean}}{ real number: location parameter of
a SNorm distribution. }
\item{\code{sd}}{ real number: scale parameter of
a SNorm distribution. }
\item{\code{name}}{ default name is
\dQuote{parameter of a SNorm distribution}. }
\item{\code{xi}}{ real number: shape parameter of
a SNorm distribution. }
}
}
\section{Extends}{
Class \code{"Parameter"}, directly.\cr
Class \code{"OptionalParameter"}, by class \code{"Parameter"}.
}
\section{Methods}{
\describe{
\item{mean}{\code{signature(object = "SNormParameter")}: access method for
slot \code{mean}. }
\item{sd}{\code{signature(object = "SNormParameter")}: access method for
slot \code{sd}. }
\item{xi}{\code{signature(object = "SNormParameter")}: access method for
slot \code{xi}. }
}
}
%\references{}
\author{Peter Ruckdeschel \email{peter.ruckdeschel@uni-oldenburg.de}}
%\note{}
\seealso{\code{\link{SNorm-class}}, \code{\link[distr]{Parameter-class}}}
\examples{
P <- new("SNormParameter")
mean(P)
sd(P)
xi(P)
P
}
\concept{SNorm distribution}
\keyword{distribution}
\concept{parameter}
\concept{S4 parameter class}
\keyword{models}
\concept{generating function}
|
784e46079c0606792773809393b4d911a3242407
|
bbf5f9cb8ef89c221e4b14518ec0e20114a73161
|
/VAR.R
|
fd19941a16d2a832b15ab75db16c0f7745edeaff
|
[] |
no_license
|
ahirwani/MacroClub
|
7d32755b704a04cee0ff89417c51fceb5c5ace8f
|
f54fb9956a5e3c4b743339df8f6ecbc4ec8858ef
|
refs/heads/master
| 2021-08-19T19:38:41.333251
| 2017-11-27T08:24:09
| 2017-11-27T08:24:09
| 109,444,864
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 813
|
r
|
VAR.R
|
library(vars)
#returns information criteria and final prediction error for sequential increasing lag order up to VAR p process
#seasona = inclusion of seasonal dummy variables
#type = deterministic regressors to include - constant, trend, both, none
data(Canada)
library(fpp)
data(usconsumption)
VARselect(Canada, lag.max = 5, type = "const")
VARselect(usconsumption, lag.max=8, type= "both")$selection
var.us <- VAR(usconsumption, p=3, type = "both")
var.can <- VAR(Canada, p=2, type = "const")
##Portmanteau test is a test that the residuals are uncorrelated.
#If p-value < 0.05 then you have serial correlation of errors
serial.test(var.us,lags.pt=10,type = "PT.asymptotic")
serial.test(var.can, lags.pt=10, type = "PT.asymptotic")
summary(var.us)
fcast.us <- forecast(var.us)
plot(fcast.us, xlab="Year")
|
769e456a055446a295c179a1a9152526aeebba91
|
2be4b043e6cfbfa4cf3869e22a22a127669755f7
|
/benchmark/MERFISH_Moffit/Seurat/Moffit_RNA.R
|
b5010a05886987b68ecc17c3077b4e0824a5ea76
|
[
"MIT"
] |
permissive
|
c4chow/SpaGE
|
ffa3a4d7e358796426aa218d67f3b8d4514f38a6
|
bda1036660ab01f8bf44993e52392a37145794a3
|
refs/heads/master
| 2023-02-27T08:35:43.333816
| 2021-02-06T23:44:13
| 2021-02-06T23:44:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 483
|
r
|
Moffit_RNA.R
|
setwd("MERFISH_Moffit/")
library(Seurat)
Moffit <- Read10X("data/Moffit_RNA/GSE113576/")
Mo <- CreateSeuratObject(counts = Moffit, project = 'POR', min.cells = 10)
Mo <- NormalizeData(object = Mo)
Mo <- FindVariableFeatures(object = Mo, nfeatures = 2000)
Mo <- ScaleData(object = Mo)
Mo <- RunPCA(object = Mo, npcs = 50, verbose = FALSE)
Mo <- RunUMAP(object = Mo, dims = 1:50, nneighbors = 5)
saveRDS(object = Mo, file = paste0("data/seurat_objects/","Moffit_RNA.rds"))
|
cd0d479d8a11dd0d485a1a234ff05b820e47a961
|
b3911e3d40a84dd1dc5ae73b0cfaed608ecd1648
|
/cachematrix.R
|
0a9dd55ca8afa2902178b0970ba7ba9ea8e24d08
|
[] |
no_license
|
koolwebdezign/ProgrammingAssignment2
|
837afb415462b65a987655d3e5025323d384d147
|
d3f507e9d402827d8d57c6305204b0881038dc00
|
refs/heads/master
| 2021-01-16T20:23:48.020388
| 2015-08-22T12:46:18
| 2015-08-22T12:46:18
| 41,201,647
| 0
| 0
| null | 2015-08-22T10:06:37
| 2015-08-22T10:06:37
| null |
UTF-8
|
R
| false
| false
| 2,717
|
r
|
cachematrix.R
|
## Data Science Specialization - John Hopkins University
## R Programming - Programming Assignment #2
## For more information about matrices and the inverse of a matrix,
## I enjoyed this web page https://www.mathsisfun.com/algebra/matrix-inverse.html
## Matrix inversion is usually a costly computation and there may be
## some benefit to caching the inverse of a matrix rather than computing
## it repeatedly (there are also alternatives to matrix inversion that
## we will not discuss here). Our assignment is to write a pair of
## functions that cache the inverse of a matrix.
## makeCacheMatrix - This function creates a special "matrix" object that
## can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inversematrix = NULL
setMatrix = function(y) {
x <<- y
inversematrix <<- NULL
}
getMatrix = function() {
x
}
setInverse = function(inverse) {
inversematrix <<- inverse
}
getInverse = function() {
inversematrix
}
list(setMatrix=setMatrix, getMatrix=getMatrix, setInverse=setInverse, getInverse=getInverse)
}
## cacheSolve - This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then cacheSolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
# Check cache for the inverse matrix from the matrix input
inverse = x$getInverse() ## getinverse() defined in makeCacheMatrix()
if (!is.null(inverse)){
# Return inverse from cache if available
return(inverse)
} else {
# Calculate the inverse
matrixdata = x$getMatrix() ## getmatrix() defined in makeCacheMatrix()
inverse = solve(matrixdata, ...) ## http://www.endmemo.com/program/R/solve.php
# Set the cache with the calculated inverse
x$setInverse(inverse)
return(inverse)
}
}
## References: Several sources were used in my efforts to research, create and test
## this solution. In practice, the internet is a valuable resource for finding
## solutions and code strategies. I successfully completed excessive testing
## of this solution such that I was able to rename variables and to familiarize myself
## with the scoping strategies of each of these functions.
## http://masterr.org/r/how-to-cache-a-matrix-inversion-in-r/
## https://stat.ethz.ch/R-manual/R-devel/library/base/html/Sys.getenv.html
## https://stat.ethz.ch/R-manual/R-devel/library/base/html/Sys.setenv.html
## http://www.endmemo.com/program/R/solve.php
## https://www.mathsisfun.com/algebra/matrix-inverse.html
|
918cde0e983cb433f3bb07ead3a0d246927bd905
|
2deb9aba70b5cb48b59a103f2bd14da908da5817
|
/Challenge3/Old Scripts/FixZeroRow.r
|
340eef888f45020354ad8faae10558c2915e3a0f
|
[] |
no_license
|
maisogjm/FlowCAP-2010
|
d0c8b73b2750803b36674fd9a27f1bd83d3d5951
|
dc0335e1bcbbc4e6a4ca018138e75193d628704d
|
refs/heads/master
| 2020-04-04T08:50:03.697547
| 2018-11-02T01:16:30
| 2018-11-02T01:16:30
| 155,796,473
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 585
|
r
|
FixZeroRow.r
|
########################################################################
## Function to fix a zero row.
########################################################################
FixZeroRow <- function(theRow,numVals,noiseMag) {
##-----------------------------------------
## If all entries are zero, then fix the row.
if ( all(theRow==0) ) {
fixedRow = noiseMag*runif(numVals)
return(fixedRow)
} else {
return(theRow)
}
}
q = matrix(runif(20),nrow=5)
q[3,] = rep(0,4)
q = t(apply(q, 1, FixZeroRow , numVals = 4, noiseMag = 0.0001))
|
8e29cca8a423990272bab749b6c7e811ea2d0c66
|
7ccd725e1c7400b44f744770d36ac197c6d4033d
|
/tests/testthat/test_25_utopiaPayoff.R
|
e2085313ac71b937dc1c0a8ead79804135a3a083
|
[] |
no_license
|
cran/CoopGame
|
9232ea51802816edfd08a2242d30f27af5fb8169
|
250940376f550789b7a2b862e177e46b0aeb9c44
|
refs/heads/master
| 2021-08-28T10:18:26.244351
| 2021-08-23T17:40:09
| 2021-08-23T17:40:09
| 236,575,654
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,691
|
r
|
test_25_utopiaPayoff.R
|
boolSkip=F
test_that("Check 25.1 - getUtopiaPayoff - 3 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(0,0,0,40,50,20,100)
result = getUtopiaPayoff(v)
expect_equal(result, c(80,50,60) )
})
test_that("Check 25.2 - getUtopiaPayoff - 3 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(0,0,0,6,5,5,10)
result = getUtopiaPayoff(v)
expect_equal(result, c(5,5,4) )
})
test_that("Check 25.3 - getUtopiaPayoff - 3 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(0, 0, 0, 60, 60, 60, 72)
result = getUtopiaPayoff(v)
expect_equal(result, c(12,12,12) )
})
test_that("Check 25.4 - getUtopiaPayoff - 3 players - example from TUGLAB",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(0, 0, 0, 9, 4, 7, 11)
result = getUtopiaPayoff(v)
expect_equal(result, c(4,7,2) )
})
test_that("Check 25.5 - getUtopiaPayoff - 3 players - example from TUGLAB modified I",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(3, 0, 0, 9, 4, 7, 11)
result = getUtopiaPayoff(v)
expect_equal(result, c(4,7,2) )
})
test_that("Check 25.6 - getUtopiaPayoff - 3 players - example from TUGLAB modified II",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(3, 1, 2, 9, 4, 7, 11)
result = getUtopiaPayoff(v)
expect_equal(result, c(4,7,2) )
})
test_that("Check 25.7 - getUtopiaPayoff - 3 players - example from TUGLAB modified III",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(3, 6, 2, 9, 4, 7, 11)
result = getUtopiaPayoff(v)
expect_equal(result, c(4,7,2) )
})
test_that("Check 25.8 - getUtopiaPayoff - 3 players - example from TUGLAB modified IV",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(3, 6, 1, 9, 4, 7, 11)
result = getUtopiaPayoff(v)
expect_equal(result, c(4,7,2) )
})
test_that("Check 25.9 - getUtopiaPayoff - 4 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(0,0,0,0,7,7,7,7,7,7,12,12,12,12,22)
result = getUtopiaPayoff(v)
expect_equal(result, c(10,10,10,10) )
})
test_that("Check 25.10 - getUtopiaPayoff - 4 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(2,5,2,5,7,7,7,7,7,7,12,12,12,12,22)
result = getUtopiaPayoff(v)
expect_equal(result, c(10,10,10,10) )
})
test_that("Check 25.11 - getUtopiaPayoff - 4 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(2,5,2,5,7,7,7,7,7,7,10,11,12,13,22)
result = getUtopiaPayoff(v)
expect_equal(result, c(9,10,11,12) )
})
test_that("Check 25.12 - getUtopiaPayoff - 4 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(2,5,2,5,7,8,7,9,7,5,10,11,12,13,22)
result = getUtopiaPayoff(v)
expect_equal(result, c(9,10,11,12) )
})
test_that("Check 25.13 - getUtopiaPayoff - 4 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(0,0,0,0,6,8,7,9,7,5,10,11,12,13,22)
result = getUtopiaPayoff(v)
expect_equal(result, c(9,10,11,12) )
})
test_that("Check 25.14 - getUtopiaPayoff - 5 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(0, 0, 0, 0, 0, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72)
result = getUtopiaPayoff(v)
expect_equal(result, c(0,0,0,0,0) )
})
test_that("Check 25.15 - getUtopiaPayoff - 5 players",{
if(boolSkip){
skip("Test was skipped")
}
v <- c(73, 0, 72, 74, 75, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 80)
result = getUtopiaPayoff(v)
expect_equal(result, c(8,8,8,8,8) )
})
|
a45bf83ade505b15cdcffbd66aaf51e734c1d5c9
|
3abf16cf5fba1e25bc7bcd110befaa31a87237c2
|
/man/cache_get.Rd
|
b7cc7a8334ca295dd99dd7a5bb69fb57d99671cd
|
[] |
no_license
|
mattdneal/cacheMan
|
9b13a8eef7b548c14328477e50bcfc122bb97566
|
27aaf954ca55ddc1d40f99c30874a74ba3da9d04
|
refs/heads/master
| 2021-01-10T07:25:54.173799
| 2016-11-08T13:32:35
| 2016-11-08T13:32:35
| 52,607,383
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 487
|
rd
|
cache_get.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cache.R
\name{cache_get}
\alias{cache_get}
\title{Retrieve from cache}
\usage{
cache_get(function_name, function_call, cache)
}
\arguments{
\item{function_name}{function to retrieve against}
\item{function_call}{function call to retrieve against}
\item{cache}{a Cache object}
}
\value{
the result of evaluating \code{function_call} (either directly or
from the cache)
}
\description{
Retrieve from cache
}
|
f192c8c0757b9358f5df87bf5ceb9f5d7b4f7b32
|
27f615eae91f8548adb1e6ba8da8888417c3ef56
|
/R/check.input.ggmanHighlight.R
|
adb36dbc42ecd5998584b15629e2b68f75e87d4c
|
[] |
no_license
|
drveera/ggman
|
364a572c289799b066f1046e7af952e6cec74ff0
|
b264cab4301c861cc033ca639fda8fd9bc694e10
|
refs/heads/master
| 2023-05-24T00:20:01.242213
| 2019-05-01T17:21:11
| 2019-05-01T17:21:11
| 75,216,178
| 24
| 13
| null | 2021-06-17T10:58:59
| 2016-11-30T18:43:06
|
R
|
UTF-8
|
R
| false
| false
| 981
|
r
|
check.input.ggmanHighlight.R
|
utils::globalVariables(c("col2rgb","highlight","colour"))
#' This function checks the input arguments to the function ggmanHighlight
#'
#' @keywords internal
#'
#'@return Nothing; internal function.
#'
#'
check.input.ggmanHighlight <- function(){
## ggmanPlot input
if(! any(class(ggmanPlot) == "ggman")){
stop("The ggmanPlot input is not a ggman object")
}
## highlight input
if(! is.vector(highlight)){
stop(paste0("The highlight input ",highlight," is not a vector object"))
}
## colour input
## lineColor input
## Thanks to Sacha Epskamp for isColor function.
## Reference:http://stackoverflow.com/questions/13289009/check-if-character-string-is-a-valid-color-representation/13290832
isColor <- function(x)
{
res <- try(col2rgb(x),silent=TRUE)
return(!"try-error"%in%class(res))
}
if(! isColor(colour)){
stop(paste0("\'",colour,"\'"," is not a valid color"))
}
}
|
076596008af2d3289ada32fbc64ea425e3f29d6f
|
40288378a4766077330c4de7f645faeaba2988c7
|
/local/bedDifferntiator.R
|
c1e3a638c177a3640c0100ad25f027a1f711f4e2
|
[] |
no_license
|
hnakhoul/cluster
|
ca3b7f03d9b4a9c0f7bb8a5b03bcb13b84a01a1d
|
b38287e96c36154a23ecb348c666fad8afc7a84e
|
refs/heads/master
| 2020-05-29T14:10:29.300798
| 2012-05-20T01:59:24
| 2012-05-20T01:59:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 468
|
r
|
bedDifferntiator.R
|
print("Enter the coverage file")
covFile=scan("stdin",what="character",n=1,quiet=TRUE)
cov=read.table(covFile,stringsAsFactors=FALSE)
boo=cov[,3]==cov[,2]
cov[,3][boo]<-cov[,3][boo]+1
# we want data in non-scientific format, without 'e'
cov$V2=format(cov$V2,scientific=F)
cov$V3=format(cov$V3,scientific=F)
cov$V4=format(cov$V4,scientific=F)
write.table(cov,file=paste("covFile",".bedDiff",sep=""),sep="\t",quote=FALSE,row.names=FALSE,col.names=FALSE)
print ("done")
|
1511545c8068879f03f371b81d77974e70c30e33
|
bc0354b7734b3e0ea8d29bbbc99ba63b063a037b
|
/Map_construction/Format_map_and_markers_final.R
|
4862b981d83fbc2ddefc5e0b1e4b421212184184
|
[] |
no_license
|
shehongbing/Rumex_genome
|
c9070b0d78ba7c2c214786dd6c4aca67272b21d8
|
ce2cbecdf583970eee2cd12e642abc354e879b5f
|
refs/heads/master
| 2022-11-13T16:21:19.816598
| 2020-07-09T21:57:10
| 2020-07-09T21:57:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,114
|
r
|
Format_map_and_markers_final.R
|
library(dplyr)
library(stringr)
library(tidyr)
setwd("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_manual_edits")
setwd("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_manual_edits")
###### Add colocated markers to map ---------------------
library(dplyr)
library(tidyr)
full_map<-read.csv("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_construction/Final_F2_style_TX_map_biggest_100_scaffolds_plus_sex_linked_10-21.csv", stringsAsFactors = F) #Import map exported from ASMAP
full_map<-read.csv("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_construction/Final_F2_style_TX_map_biggest_100_scaffolds_plus_sex_linked_10-21.csv", stringsAsFactors = F) #Import map exported from ASMAP
full_map<-full_map[,1:3] #Remove individual genotypes
full_colocated<-read.csv("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_construction/Colocated_markers_biggest_100_plus_sex_linked.csv", stringsAsFactors = F) #Import colocated marker bins exported from ASMAP
full_colocated<-read.csv("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_construction/Colocated_markers_biggest_100_plus_sex_linked.csv", stringsAsFactors = F) #Import colocated marker bins exported from ASMAP
full_map<-read.csv("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/HiCSNPs/TranscriptomeLinkageMap/ASMAP/NC_transcriptome/F2_style_NC_map_clean_biggest100_5-28.csv", stringsAsFactors = F) #Import map exported from ASMAP
#full_map<-read.csv("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_construction/PleaseLetThisBeItNC_map_biggest_100_scaffolds_sex_linked_split_11-8.csv", stringsAsFactors = F) #Import map exported from ASMAP
full_map<-full_map[,1:3] #Remove individual genotypes
full_colocated<-read.csv("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/HiCSNPs/TranscriptomeLinkageMap/ASMAP/NC_transcriptome/Colocated_markers_biggest_100.csv", stringsAsFactors = F) #Import colocated marker bins exported from ASMAP
#full_colocated<-read.csv("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_construction/NCColocated_markers_biggest_100_plus_sex_linked.csv", stringsAsFactors = F) #Import colocated marker bins exported from ASMAP
#head(full_colocated)
#head(full_map)
#length(intersect(full_map[,1], full_colocated$mark)) #Identify number of markers in map with associated bins of colocated markers
#View(intersect(full_map[,1], full_colocated$mark))
#head(full_map)
positions_colocated<-full_join(full_colocated, full_map, by=c("mark"="Genotype"), fill=T) #Join list of colocated markers to map
View(subset(positions_colocated, positions_colocated$bins=="239"))
positions_colocated_filled<-positions_colocated%>%group_by(bins)%>%fill(X.y,X.1) #Fill centimorgan and linkage group info for each bin of colocated markers
positions_colocated_filled<-subset(positions_colocated_filled,is.na(positions_colocated_filled$X.1)==F) #Discard markers that were not placed in the map
head(positions_colocated_filled)
positions_colocated_filled<-positions_colocated_filled[,-3]
colnames(positions_colocated_filled)<-c("index","bin","marker","LG","CM")
write.csv(positions_colocated_filled,"Old_Map_NC_Final_markers_positions_colocated_filled.csv", row.names=T, quote = F)
#################### Move small scaffolds from small linkage groups to parallel full linkage groups TX #######
positions_colocated_filled<-read.csv("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_manual_edits/Final_markers_positions_colocated_filled.csv", stringsAsFactors = F, row.names=1)
positions_colocated_filled<-read.csv("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_manual_edits/Final_markers_positions_colocated_filled.csv", stringsAsFactors = F, row.names=1)
#Move markers from unique scaffolds on LG4 to LG10
positions_colocated_filled<-data.frame(positions_colocated_filled)
colnames(positions_colocated_filled)
str(positions_colocated_filled)
positions_colocated_filled<-separate(positions_colocated_filled, marker, into=c(NA, "scaffold", "position"), se=("\\_"), remove = F) %>% separate(., scaffold, into=c("scaffold", NA, NA), se=c("\\-"), remove = F)
CM_4_10_difference<-(subset(positions_colocated_filled, positions_colocated_filled$marker == "ScnbKXS_17040-HRSCAF-21160_3444680")$CM)-
(subset(positions_colocated_filled, positions_colocated_filled$marker == "ScnbKXS_17040-HRSCAF-21160_4391861")$CM)
positions_colocated_filled<-positions_colocated_filled %>%
mutate (
CM = case_when (
LG == "L.4" ~ (CM+CM_4_10_difference ),
TRUE ~ CM)) %>%
mutate (
LG = case_when (
LG == "L.4" ~ "L.10",
TRUE ~ LG))
CM_15769<- (subset (positions_colocated_filled, positions_colocated_filled$marker=="ScnbKXS_17265-HRSCAF-21557_12838208"))$CM
positions_colocated_filled<-positions_colocated_filled %>%
mutate (
CM = case_when (
marker == "ScnbKXS_15769-HRSCAF-18489_111981" ~ (CM_15769),
TRUE ~ CM)) %>%
mutate (
LG = case_when (
marker == "ScnbKXS_15769-HRSCAF-18489_111981" ~ "L.5",
TRUE ~ LG))
#Fix LG 11 unique scaffold positions
CM_292<-(subset (positions_colocated_filled, positions_colocated_filled$marker=="ScnbKXS_15927-HRSCAF-18817_297474"))$CM
#CM_3947<-(subset (positions_colocated_filled, positions_colocated_filled$marker=="ScnbKXS_15927-HRSCAF-18817_297474"))$CM #These two scaffolds are at the same genetic position
CM_14219<-(subset (positions_colocated_filled, positions_colocated_filled$marker=="ScnbKXS_17162-HRSCAF-21394_26890388"))$CM
positions_colocated_filled<-positions_colocated_filled %>%
mutate (
CM = case_when (
(scaffold == 292 | scaffold == 3947) ~ (CM_292),
TRUE ~ CM)) %>%
mutate (
LG = case_when (
(scaffold == 292 | scaffold == 3947) ~ "L.7",
TRUE ~ LG))
positions_colocated_filled<-positions_colocated_filled %>%
mutate (
CM = case_when (
(scaffold == 14219) ~ (CM_14219),
TRUE ~ CM)) %>%
mutate (
LG = case_when (
(scaffold == 14219) ~ "L.7",
TRUE ~ LG))
#View(test)
positions_colocated_filled<-filter(positions_colocated_filled, !(LG %in% c("L.1", "L.2","L.4","L.6","L.9","L.11", "L.12")))
write.csv(positions_colocated_filled, "Final_edited_markers_positions_colocated_filled.csv", quote = F, row.names = F)
#################### Move small scaffolds from small linkage groups to parallel full linkage groups NC #######
positions_colocated_filled<-read.csv("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_manual_edits/Old_Map_NC_Final_markers_positions_colocated_filled.csv", stringsAsFactors = F, row.names=1)
#positions_colocated_filled<-read.csv("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_manual_edits/NC_Final_markers_positions_colocated_filled.csv", stringsAsFactors = F, row.names=1)
positions_colocated_filled<-data.frame(positions_colocated_filled)
colnames(positions_colocated_filled)
str(positions_colocated_filled)
positions_colocated_filled<-separate(positions_colocated_filled, marker, into=c(NA, "scaffold", "position"), se=("\\_"), remove = F) %>% separate(., scaffold, into=c("scaffold", NA, NA), se=c("\\-"), remove = F)
#17271, 17272, 4250, 6377 - 17040 4008573 L.10
positions_colocated_filled<-positions_colocated_filled %>%
mutate (
CM = case_when (
LG == "L.11" & (scaffold == 4250 | scaffold == 6377 | scaffold == 17271 | scaffold == 17272) ~ (subset(positions_colocated_filled, positions_colocated_filled$marker == "ScnbKXS_17040-HRSCAF-21160_4008573")$CM),
TRUE ~ CM)) %>%
mutate (
LG = case_when (
LG == "L.11" & (scaffold == 4250 | scaffold == 6377 | scaffold == 17271 | scaffold == 17272) ~ "L.10",
TRUE ~ LG))
#7031 - 17265 18616492 L.10
#8361 - 11619 3218474 L.10 - ((11619 3213180 L.5)-(8361 116661 L.5))
positions_colocated_filled<-positions_colocated_filled %>%
mutate (
CM = case_when (
LG == "L.5" & (scaffold == 8361) ~ (subset(positions_colocated_filled, positions_colocated_filled$marker == "ScnbKXS_11619-HRSCAF-13537_3218474")$CM - (((subset(positions_colocated_filled, positions_colocated_filled$marker == "ScnbKXS_11619-HRSCAF-13537_3213180")$CM) - ((subset(positions_colocated_filled, positions_colocated_filled$marker == "ScnbKXS_8361-HRSCAF-9758_116661")$CM))))),
TRUE ~ CM)) %>%
mutate (
LG = case_when (
LG == "L.5" & (scaffold == 8361) ~ "L.10",
TRUE ~ LG))
View(positions_colocated_filled)
positions_colocated_filled<-filter(positions_colocated_filled, (LG %in% c("L.1", "L.7","L.9","L.10")))
write.csv(positions_colocated_filled, "Old_Map_NC_Final_edited_markers_positions_colocated_filled.csv", quote = F, row.names = F)
################ Reformat map and marker files for chromonomer ####################
setwd("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/Chromonomer_final")
head(positions_colocated_filled)
Chromonomer_input_map<-positions_colocated_filled[,c(6,3,7)] #LG, marker, position
write.table(Chromonomer_input_map, "TX_chromonomer_input_map_final.txt", quote=F, row.names = F,col.names = F, sep = "/t", eol = "\n")
Chromonomer_marker_names<-positions_colocated_filled[,c(3)]
write.table(Chromonomer_marker_names, "TX_chromonomer_markers_final.txt", quote=F, row.names = F,col.names = F, eol = "\n")
Chromonomer_input_map<-positions_colocated_filled[,c(6,3,7)] #LG, marker, position
write.table(Chromonomer_input_map, "Old_Map_NC_chromonomer_input_map_final.txt", quote=F, row.names = F,col.names = F, sep = "\t", eol = "\n")
Chromonomer_marker_names<-positions_colocated_filled[,c(3)]
write.table(Chromonomer_marker_names, "Old_Map_NC_chromonomer_markers_final.txt", quote=F, row.names = F,col.names = F, eol = "\n")
############## Format markers for chromonomer AGP conversion script ###################
setwd("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/Chromonomer_final")
library(tidyr);library(dplyr)
positions_colocated_filled<-read.csv("E:/Users/Joanna/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_manual_edits/Final_edited_markers_positions_colocated_filled.csv", header=T, stringsAsFactors = F, row.names = F)
positions_colocated_filled<-read.csv("D:/Dropbox/Dropbox/Professional/University_of_Toronto/Genomics/Rumex_genome_paper/Finalized_analyses/ASMAP_final/Map_manual_edits/Final_edited_markers_positions_colocated_filled.csv", header=T, stringsAsFactors = F)
colnames(positions_colocated_filled)
#conversion_markers<-positions_colocated_filled %>% separate(marker, int0)
#positions_colocated_filled<-separate(positions_colocated_filled, marker, into=c(NA, "scaffold", "position"), se=("//_"), remove = F) %>% separate(., scaffold, into=c("scaffold", NA, NA), se=c("//-"), remove = F)
markers_for_conversion<-positions_colocated_filled[,c(5,4,6, 7)]
colnames(markers_for_conversion)
markers_for_conversion<-separate(markers_for_conversion, LG, into=c(NA, "LG"), sep="\\.")
#write.csv(markers_for_conversion, "TXFinal_markers_for_conversion.csv", quote = F)
#write.csv(markers_for_conversion, "Old_Map_NCFinal_markers_for_conversion.csv", quote = F)
colnames(full_map)
lengths<-full_map %>% group_by(X) %>% summarize(lglength=max(X.1))
View(lengths)
write.table(lengths, "lengths")
|
93e3b9c0e58a04654ceb981fef9d9c8afc123df2
|
2152d53466558f22bba1c8491a83e262b916c524
|
/man/mscon.Rd
|
4c6466e4b2d3c8c19287b742a078439775d245d3
|
[] |
no_license
|
seokhoonj/msodbc
|
6e76659adcbb8291dd00417173724d1ec7661d52
|
94a4522eebc686a36440d6fe95f82095b58ad542
|
refs/heads/master
| 2023-01-28T20:26:15.520762
| 2020-12-15T06:50:50
| 2020-12-15T06:50:50
| 320,268,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 435
|
rd
|
mscon.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mscon.R
\name{mscon}
\alias{mscon}
\title{Connect to mssql server}
\usage{
mscon(dsn, database, path, DBMSencoding = "UTF-8")
}
\arguments{
\item{dsn}{is a data source name}
\item{database}{is a database}
\item{path}{is a search path}
\item{DBMSencoding}{is a dbms encoding}
}
\description{
Connect to mssql server
}
\keyword{connector}
\keyword{ms-sql}
|
c964d7514a60c2c7454fee1b2183ca4c2e257d84
|
d9396697675606d97824a787a7b0f8c19619b3f8
|
/man/hits.Rd
|
27647eec477fe382925c6462da60c4cdaa8077c4
|
[] |
no_license
|
qchengray/sommer
|
45859dbd6550a6c7069ab4a2d1904da6ed41f6b1
|
aca3863a7e1df8b462212c40468e44083c76812f
|
refs/heads/master
| 2021-01-23T04:38:57.350625
| 2017-08-24T07:25:49
| 2017-08-24T07:25:49
| 102,450,483
| 1
| 0
| null | 2017-09-05T07:45:38
| 2017-09-05T07:45:37
| null |
UTF-8
|
R
| false
| false
| 5,315
|
rd
|
hits.Rd
|
\name{hits}
\alias{hits}
\title{Creating a fixed effect matrix with significant GWAS markers}
\description{
This function was designed to create a design matrix with the significant markers found by a GWAS analysis in order to use it in the GBLUP or genomic prediction analysis to increase the predction accuracy. The method is based on sevral papers suggesting that the use of signifant markers associated to the causal variant may increase the prediction accuracy under the GBLUP framework. For example, Bernardo (2014), Gianola et al. (2014) Abdollahi Arpanahi et al. (2015) papers were using the top 10-20 GWAS hit markers and using them as fixed effect, increasing the prediction accuracy of the genomic prediction model. This phenomena has been explained arguing that the mixed model shrinks too much the effect of markers with big effects and therefore using such markers as fixed effects causes a dramatic increase in the prediction accuracy of a model using them. It has to be noted that for traits of low h2 or with a high number of QTL's with small effects this method doesn't help but actually has a reducing effect in the prediction accuracy.
}
\usage{
hits(gwasm, nmar=10, threshold=1, pick=FALSE, method="cluster", only.mark=FALSE,
plotting=TRUE)
}
\arguments{
\item{gwasm}{a GWAS model fitted using mmer}
\item{nmar}{the number of GWAS hits (markers) to be used for designing the incidence matrix. It finds the markers with maximum significance value and uses them to create the design matrix. The default is the top 10 markers.}
\item{threshold}{a numeric value indicating the minimum significance value to be used for finding the significant markers. the dedault is 1.}
\item{pick}{a TRUE/FALSE value indicating if the user prefers to pick the peaks by himself. The default is FALSE leaving the peak selection to one of the two methods available. If set to TRUE R will allow the user to pick the peaks by cliking over the peaks and typing 'Esc' when the user is done selecting the peaks.}
\item{method}{one of the two methods available; "cluster" performs peak selection by making clusters using k-means (random clusters), whereas "maximum" takes the markers with highest log p.values and select those for designing the model matrix.}
\item{only.mark}{a TRUE/FALSE statement indicating if the only output should be the marker names or the incidence matrix. By default it returns the incidence matrix but if turned to TRUE will return only the marker names. Useful when want to identify significant markers in different populations.}
\item{plotting}{a TRUE/FALSE statement indicating if the program should plot the GWAS showing which markers were selected.}
}
\value{
If all parameters are correctly indicated the program will return:
\describe{
\item{$X2}{a design matrix with individuals in rows and markers in columns to be used for the GBLUP model based on the GWAS model provided.}
}
}
\references{
Bernardo, Rex. 2014. Genomewide selection when major genes are known. Crop Science 54.1:68-75.
Gianola, Daniel, et al. 2014. Enhancing genome-enabled prediction by bagging genomic BLUP. PLoS One 9.4: e91693.
Abdollahi Arpanahi R, Morota G, Valente BD, Kranis A, Rosa GJM, Gianola D. 2015. Assessment of hitsging GBLUP for whole genome prediction of broiler chicken traits. Journal of Animal Breeding and Genetics 132:218-228.
Covarrubias-Pazaran G (2016) Genome assisted prediction of quantitative traits using the R package sommer. PLoS ONE 11(6): doi:10.1371/journal.pone.0156744
}
\author{
Giovanny Covarrubias-Pazaran
}
\examples{
####=========================================####
#### For CRAN time limitations most lines in the
#### examples are silenced with a single '#' mark,
#### remove them and run the examples
###=========================================####
data(CPdata)
####=========================================####
#### convert markers to numeric format
####=========================================####
## fit a model including additive and dominance effects
y <- CPpheno$color
Za <- diag(length(y))
A <- A.mat(CPgeno) # additive relationship matrix
####=========================================####
#### compare prediction accuracies between
#### GBLUP and hits GBLUP
####=========================================####
set.seed(1234)
y.trn <- y # for prediction accuracy
ww <- sample(c(1:dim(Za)[1]),72) # delete data for one fifth of the population
y.trn[ww] <- NA
####=========================================####
#### identify major genes and create the hit matrix
####=========================================####
#ETA.A <- list(list(Z=Za,K=A))
#ans.GWAS <- mmer(Y=y.trn, Z=ETA.A, W=CPgeno)
#summary(ans.GWAS)
####=========================================####
#### run the hits function to design the matrix
#### for top GWAS hits
####=========================================####
#X1 <- hits(ans.GWAS);head(X1); dim(X1)
#ETA.A <- list(list(Z=Za,K=A))
#ans.A <- mmer(Y=y.trn, Z=ETA.A) # GBLUP
#ans.AF <- mmer(Y=y.trn, X=X1, Z=ETA.A) # hitsging-GBLUP
#cor(ans.A$fitted.y[ww], y[ww], use="pairwise.complete.obs") # GBLUP
#cor(ans.AF$fitted.y[ww], y[ww], use="pairwise.complete.obs") # hitsging-GBLUP
#### little increase in prediction accuracy
}
\seealso{The core functions of the package \code{\link{mmer}} and \code{\link{mmer2}}}
|
feeb042ffd18b5c14cc2eae1ef7568a827a2a002
|
a6c5ec2e83e02db1436abc6f9508ce101272d25d
|
/R/Day4.R
|
d149c56ebb9fc205f81e6bd5a0051e48ac906c31
|
[] |
no_license
|
jonathanjohn47/R-assignments
|
740065f67e6a0198a1d8c48a05ab6c26fe5c39f6
|
8fe75869168ef1c84379816c5c6b76ccd0a8924d
|
refs/heads/master
| 2020-04-08T07:15:06.777407
| 2018-11-26T08:08:00
| 2018-11-26T08:08:00
| 159,132,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,044
|
r
|
Day4.R
|
sname <- c("Alex", "Lilly", "Mark")
age <- c(25, 31, 23)
height <- c(177, 163, 190)
weight <- c(57, 69, 83)
sex <- c("F", "F", "M")
frame1 <- data.frame(sname = sname, age = age, height = height, weight = weight, sex = sex)
frame2 <- data.frame(sname = frame1[1], working = c("Yes", "No", "No"))
frame3 <- cbind(frame1, frame2[2])
lapply(frame1, class)
mean(frame1[,3])
BMI <- 10000*frame1[,4]/(frame1[,3]**2)
cbind(frame1, BMI)
Healthy <- ifelse(BMI<25, "Yes", "No")
cbind(frame1, Healthy)
#------------------------------
getwd()
r1<- read.table(file.choose())
r2<- read.table(file.choose())
#------------------------------
r<-c("r1", "r2")
c<-c("c1", "c2", "c3")
m<-c("m1", "m2", "m3")
A <- array(1:20, dim=c(2,3,3), list(r, c, m))
#------------------------------
mtcars
B <- data.frame(mpg=mtcars$mpg, cyl=mtcars$cyl, hp=mtcars$hp)
C <- data.frame(head(mtcars, 5))
D <- data.frame(tail(mtcars, 5))
rbind(C, D)
#------------------------------
add <-function(a=1, b=1)
{
c <- a+b
c
}
add(3,)
add(, 5)
add(5, 3)
add(b=3, a=5)
|
e7b90e99d61d3fe7117d78bd7cf962822f1bce8f
|
a720b0c2326be5bf7b42f47ed3598ec89bb46fc7
|
/R/MyluParams.R
|
e2df8322eecbff7085c13345debe8bd358ad8813
|
[] |
no_license
|
cghaase/batwintor-1
|
4e877d7a6f925ff95a976893fc9d2701a5c428e1
|
59dcf1e8d02677246f0932550ee977cdeab29fbf
|
refs/heads/master
| 2021-06-08T20:25:49.334893
| 2018-03-12T22:44:02
| 2018-03-12T22:44:02
| 113,110,505
| 0
| 0
| null | 2017-12-05T00:21:51
| 2017-12-05T00:21:51
| null |
UTF-8
|
R
| false
| false
| 2,196
|
r
|
MyluParams.R
|
#' Metabolic paramerters for \emph{Myotis lucifigus}.
#'
#' A dataset containing information on euthermic, torpid metabolic rates,
#' surface area estimations, and other parameters used throughout this
#' package for \emph{Myotis lucifigus} (little brown bat).
#'
#' @format a dataset containg 24 measurements for 1 species
#' \describe{
#' \item{RMR}{Resting metabolic rate in volume O2 mL/h/g}
#' \item{TMRmin}{minimum metabolic rate during torpor in volume O2 mL/h/g}
#' \item{Teu}{euthermic temperature in degrees C}
#' \item{Tlc}{lower critical temperature in degrees C}
#' \item{Ttormin}{Temperature at which TMRmin is achieved in degrees C}
#' \item{Ceu}{conductance during euthermic temperatures (TODO units)}
#' \item{Ct}{conductance during torpor (TODO units)}
#' \item{S}{specific heat of tissue (TODO units)}
#' \item{ttormax}{maximal length of time for a bout of torpor in hours}
#' \item{teu}{time spent euthermic during a bout of torpor in hours}
#' \item{mass}{animal mass in grams}
#' \item{WR}{warming rate from torpor to euthermic temperature in degrees C/
#' hour}
#' \item{CR}{cooling rate from euthermic temperature to torpor in degrees C/
#' hour}
#' \item{rEWL}{rate of evaporative water loss}
#' \item{wing.area}{No clue what that one is.}
#' \item{colony.size}{estimate of the number of individuals in a hibernacula}
#' \item{SA.body}{estimate of the body surface area in cm^2}
#' \item{SA.wing}{estimate of the wing surface area in cm^2}
#' \item{pmass}{precent of body mass selected to trigger evaporative water
#' loss arrousals}
#' \item{mrPd}{No clue what this one is. TODO}
#' \item{aPd}{No clue what this one is. TODO}
#' \item{rPd}{No clue what this one is. TODO}
#'
#' }
#' @details Within this data set, and generally through out the package
#' \code{T} will represent a temperature value, while \code{t} will represent
#' a time value.
#'
#' Naming of variables is largely drawn from the "Physiological
#' Ecology and Energetics of Bats" by Speakman and Thomas contained within
#' Bat Ecology.
#'
#' @seealso data("bat.params"), data("fung.params")
#'
#' @references Haymen et al. 2016
#'
"mylu.params"
|
95dbde326ea8085ad5403750e17741391a18645a
|
fca8d8412242444554dc14cef0684c53b017f18e
|
/plot4.R
|
9fcac2b48892c001772c92b253307017ceaf1cbf
|
[] |
no_license
|
jacobjozwiak/ExData_Plotting1
|
a2322ea41f9356f942998f5925d9cfa9c629a8c7
|
f91d2e974dcb8a004af34509cce5a63899782649
|
refs/heads/master
| 2022-07-10T01:43:01.700359
| 2020-05-11T06:29:06
| 2020-05-11T06:29:06
| 262,956,990
| 0
| 0
| null | 2020-05-11T06:25:27
| 2020-05-11T06:25:27
| null |
UTF-8
|
R
| false
| false
| 1,118
|
r
|
plot4.R
|
# plot4.R
# Author: Jacob Jozwiak
#
# Creates four charts in a 2x2 configuration for the dates 1-2 of February 2007.
source("load_data.R")
plot4 <- function() {
data <- load_data(c("1/2/2007", "2/2/2007"))
png(filename = "plot4.png")
with(data, {
#2 columns and 2 rows
par(mfcol = c(2, 2))
#Top Left Plot
plot(Date_time, Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
#Botton Left Plot
plot(Date_time, Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = "")
lines(Date_time, Sub_metering_1)
lines(Date_time, Sub_metering_2, col = "red")
lines(Date_time, Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty = 1, bty = "n")
#Top Right Plot
plot(Date_time, Voltage, type = "l", xlab = "datetime")
#Bottom Right Plot
plot(Date_time, Global_reactive_power, type = "l", xlab = "datetime")
})
dev.off()
}
|
1f6381a54747c670f20aa798634f58f216edf2da
|
f28de1f5e81cd0a9eef4bd749366b5fd09a6b764
|
/NBA Salary Analysis/NBA Salary Analysis.R
|
d8fa19bdfa052b9fac6e8b731e3df405bdf9dd54
|
[] |
no_license
|
zackbaker23/Machine-Learning-Data-Analysis-Projects
|
260e049f5042177c5eed9014b4c22dbd6c0dfd2b
|
4fa2352a577502b19607bd097a84f465964ece9e
|
refs/heads/master
| 2023-02-12T19:41:34.877077
| 2021-01-15T18:39:00
| 2021-01-15T18:39:00
| 282,029,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,010
|
r
|
NBA Salary Analysis.R
|
#Load required packages
library(readr)
library(alr4)
library(dplyr)
library(ggplot2)
library(olsrr)
#Read the data file into nba_data
nba_data <- read_csv("NBA Data.csv")
View(nba_data)
################## DATA MANIPULATION #####################################
#Make followers and turnovers strictly positive.
#This is necessary for the Box-Cox transformation we will perform later.
nba_data$followers <- nba_data$followers+0.01
nba_data$tov <- nba_data$tov+0.001
################ NEED FOR PREDICTOR TRANSFORMATION??? ##########################
#Variable scatterplot - followers, ftar clearly need transformation.
pairs(~ salary + age + netrtg + astpct + rebpct +
usg + followers + tov + ftar + ws, data = nba_data)
#Multivariate Box-Cox tranformation of predictors
summary(trp <- powerTransform(cbind(age,astpct,rebpct,followers,usg,ftar,ws,tov)~1, data=nba_data))
############################# MODEL 1 ##########################################
#Initial model with predictors transformed according to MBC
#Contains all potential predictors.
init_model_1 <- lm(salary~pos+log(age)+netrtg+log(astpct)+rebpct+
usg+log(followers)+tov+I(ftar^(-1/2))+I(ws^(1/2)), data=nba_data)
summary(init_model_1)
#MODEL 1: Stepwise regression with transformed predictors
#and untransformed response. Only want to transform the
#response variable if diagnostics look bad.
step(init_model_1, scope=~1, direction="backward")
summary(m1 <- lm(salary ~ log(age) + usg + log(followers) + tov +
I(ws^(1/2)), data = nba_data))
################## RESIDUAL DIAGNOSTICS FOR MODEL 1 #############################
#Plot residual vs. fitted values
ols_plot_resid_fit(m1)
#Normal qqplot of residuals
ols_plot_resid_qq(m1)
#Residual histogram
ols_plot_resid_hist(m1)
################ NEED FOR RESPONSE TRANSFORMATION??? ##########################
#Transformation of response?
inverseResponsePlot(init_model_1)
boxCox(init_model_1)
summary(powerTransform(init_model_1))
############################ MODEL 2 ##########################################
#Model with transformed predictors and Salary^(1/2)
init_model_2 <- lm(I(salary^(1/2))~pos+log(age)+netrtg+log(astpct)+rebpct+
usg+log(followers)+tov+I(ftar^(-1/2))+I(ws^(1/2)), data=nba_data)
summary(init_model_2)
#Backward selection algorithm for new model
step(init_model_2, scope=~1, direction="backward")
#MODEL 2: Model with transformed predictors and response
m2 <- lm(I(salary^(1/2)) ~ log(age) + netrtg + usg + log(followers) +
tov + I(ws^(1/2)), data = nba_data)
summary(m2)
######################### RESIDUAL DIAGNOSTICS FOR M2 ###########################
#Plot residual vs. fitted values
ols_plot_resid_fit(m2)
#Normal qqplot of residuals
ols_plot_resid_qq(m2)
#Residual histogram
ols_plot_resid_hist(m2)
#Cook's distance plot
ols_plot_cooksd_bar(m2)
#Diffs plot
ols_plot_dffits(m2)
|
8180c7fbddf1194fecb666305a4506ca60888e91
|
1fecedfbcab8c27598e89ea9192b18663d6f6922
|
/man/qualitySubpops.Rd
|
a57c03e7acf3394013c6d180549535d51a1fa6ca
|
[] |
no_license
|
cran/ConnMatTools
|
5ece4ff5e09bf1e4b6a07fcd5a437acbda90694e
|
1020c4b9aab79ac76dd58a141e7bd0dccc2e07b1
|
refs/heads/master
| 2020-12-25T17:56:15.825896
| 2020-02-03T09:30:02
| 2020-02-03T09:30:02
| 18,805,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,378
|
rd
|
qualitySubpops.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jacobi_etal_2012.R
\encoding{UTF-8}
\name{qualitySubpops}
\alias{qualitySubpops}
\title{Quality measure for subpopulation division}
\usage{
qualitySubpops(subpops.lst, conn.mat)
}
\arguments{
\item{subpops.lst}{A list whose elements are vectors of indices for each
subpopulation. If a vector of integers is given, then
\code{\link{subpopsVectorToList}} is applied to convert it to a list of
subpopulations.}
\item{conn.mat}{A square connectivity matrix.}
}
\value{
The quality statistic.
A smaller value of the quality statistic indicates less leakage.
}
\description{
A measure of the leakage between subpopulations for a given division of the
connectivity matrix into subpopulations. This statistic is equal to 1 -
mean(RLR) of the reduced connectivity matrix, where RLR=relative local
retention (\code{\link{relativeLocalRetention}}), i.e., the fraction of
settling individuals that originated at their site of settlement.
}
\references{
Jacobi, M. N., Andre, C., Doos, K., and Jonsson, P. R. 2012.
Identification of subpopulations from connectivity matrices. Ecography, 35:
1004-1016.
}
\seealso{
See also \code{\link{optimalSplitConnMat}},
\code{\link{subpopsVectorToList}}, \code{\link{relativeLocalRetention}}
}
\author{
David M. Kaplan \email{dmkaplan2000@gmail.com}
}
|
eaaf6488ce8da1be9f88d351888a54cf90ee8e0f
|
9ab8188d560f8983a04ccde1222582a34e0460c2
|
/R/SpatialProvRawData_function.R
|
535ee1b79dc05b392930605b5b1b54d94c3ef91e
|
[] |
no_license
|
ArdernHB/GeoOrigins
|
02130869539e786bcc0f9c0396634c95f5d81a14
|
540ff84e5244d7b5a6f6e8ad3dbd04a68fd2a02a
|
refs/heads/master
| 2022-12-03T15:36:05.646386
| 2022-11-15T07:27:40
| 2022-11-15T07:27:40
| 192,945,339
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,776
|
r
|
SpatialProvRawData_function.R
|
#' Spatially provenance a specimen from raw trait data
#'
#' This function takes the raw variables of an unknown specimen and reference specimens
#' and uses euclidean distances to calculate a likely spatial provenance. Note that this
#' procedure can only be applied to one unknown specimen at a time. Shape variables
#' can be specified and if so Procrustes distances can be calculated. It has two applications
#' either: calculating a specimens' provenance, or alternatively it can be used to calculate the
#' minimum correlation coefficient needed to correctly identify a known specimen at its true
#' collection location. The second application of this function can work as a correct cross-
#' validation process if looped, but see \code{IDbyDistanceRawDataCCV} function which does this automatically
#' in a leave-one-out process. However, if a cross-validation process that removed more than one specimen
#' from the reference dataset at a time is required then it is advised that this be applied using
#' this function.
#' @param LatLongs a matrix of n rows by 2 columns where n is the number of reference specimens in your dataset and the columns are Latitude and Longitude values in that order. These latitude-longitude coordinates should be of the locations of the reference specimens.
#' @param TargetData is a vector of unknown specimen data. If it is geometric morphometric data is should be a vector of superimposed coordinated in the format X1, Y1, X2, Y2... etc. (or a vector of other standardised variables that can appropriately have euclidean distances calculated between it and reference variables). NB if applied to a reference collection specimen be sure to remove it from the RefData dataset.
#' @param RefData is a matrix of Reference specimen data where the rows are the individual reference specimens and the columns are the variables in the same order as the TargetData vector.
#' @param ShapeData logical indicating whether the data is geometric morphometric shape data that requires superimposition. Default set to TRUE
#' @param ShapeDim integer either 2 or 3 to indicate the dimensions of landmark coordinates if the data is geometric morphometric data.
#' @param DistMethod determines what kind of distance calculation should be used, either Euclidean "Euc" or Procrustes "Proc". If the user wishes to use another distance or dissimilarity please use the IDbyDistanceDistInput function.
#' @param LongRange is a vector of 2 elements defining the maximum and minimum Longitude values that the provenancing method should explore. This will also define the mapping range in the final plotted output.
#' @param LatRange is a vector of 2 elements defining the maximum and minimum Latitude values that the provenancing method should explore. This will also define the mapping range in the final plotted output.
#' @param RangeSamp is an integer vector of 1 or 2 elements that defines the resolution of spatial sampling. If one element is provided then both the latitude and longitude ranges are equally and evenly sampled using this value. If 2 elements are provided they should be in the order of latitude longitude and each range will be evenly sampled with its respective value.
#' @param Verbose logical whether or not a matrix of spatial correlation values is returned or not. Default is set to TRUE.
#' @param PrintProg logical whether or not to print a progress bar. Default set to FALSE.
#' @param Validate logical whether or not to run a correct cross-validation analysis to find the lowest required correlation value for correct identification.
#' @param ValidLatLongs if the process is carried out on a specimen of known location `(e.g. Validate=TRUE)`, then the latitude longitude coordinates for that location should be provided here in that order.
#' @param PlotRes logical whether or not to plot the provenancing map with heat values of most likely spatial origin.
#' @param HeatHue numeric vector of 2 elements each between 0 and 1. The first should be the hue value on the HSV scale; the second value should be the level of transparency of the colour used.
#' @param TileSize numeric to dictate the pixel size of the heat mapping colour values.
#' @param PlotProv logical if the map should be printed with a polygon demarking a contour at a user defined correlation value.
#' @param PlotValCor numeric correlation value that is used to determine the most likely origin of the specimen. This value can be calculated by using the correct cross-validation method and identifying the correlation value that will correctly identify a desired percentage of specimens (e.g. 95\%).
#' @param Method determines what kind of correlation coefficient should be used, either "Spearman" or "Pearson". Spearman's ranked correlation coefficient does not assume a linear relationship between geographic and trait distances, whereas Pearson's coefficient does.
#' @param PacificCent logical determines whether the plotted map and analyses should be centred on the Pacific. Default set to FALSE.
#' @return If Verbose is TRUE then a dataframe of all values for every sampled grid reference is returned. If Verbose is FALSE then only those grid references with the highest correlation values are returned.
#' @details When used for shape data and for Procrustes distances this function makes use of the \code{\link[shapes]{procGPA}} and \code{\link[shapes]{procdist}} functions from the \code{shapes} package. When Euclidean distances are employed the \code{\link[stats]{dist}} function of the base \code{stats} package is used.
#' This method also makes use of the \code{\link[stats]{cor.test}} function from the \code{stats} package. When the \code{PrintProg} is set to TRUE, the \code{\link[svMisc]{progress}} function of the \code{svMisc} package is used.
#' The map plotting of this function makes use of the functions of the \code{maps} package.
#'
#' @section Citations:
#'
#' Original S code by Richard A. Becker, Allan R. Wilks. R version by Ray Brownrigg.
#' Enhancements by Thomas P Minka and Alex Deckmyn. (2017). maps: Draw Geographical Maps. R
#' package version 3.2.0. https://CRAN.R-project.org/package=maps
#'
#' Ian L. Dryden (2016). shapes: Statistical Shape Analysis. R package version 1.1-13.
#' https://CRAN.R-project.org/package=shapes
#'
#' Grosjean, Ph. (2016). svMisc: SciViews-R. UMONS, Mons, Belgium.
#' http://www.sciviews.org/SciViews-R.
#'
#' @author Ardern Hulme-Beaman
#'
#' @examples
#' Range.Exp <- .5
#'
#' Long.Range <- c(floor(min(Rpraetor$Lat.Long$Long))
#' -Range.Exp,ceiling(max(Rpraetor$Lat.Long$Long)+Range.Exp))
#' Lat.Range <- c(floor(min(Rpraetor$Lat.Long$Lat))
#' -Range.Exp,ceiling(max(Rpraetor$Lat.Long$Lat)+Range.Exp))
#'
#' RpraetorDataMat <- Array2Mat(Rpraetor$LMs)
#'
#' rThres <- IDbyDistanceRawDataCCV(LatLongs = Rpraetor$Lat.Long,
#' RefData = RpraetorDataMat,
#' ShapeData=TRUE,
#' ShapeDim=2,
#' DistMethod= "Proc",
#' Verbose = TRUE,
#' ProvConfidence = .95,
#' PrintProg = FALSE,
#' Method = 'Spearman')
#'
#' R.Samp <- c(12, 42)
#'
#' IDbyDistanceRawData(LatLongs = Rpraetor$Lat.Long[-1,],
#' TargetData = RpraetorDataMat[1,],
#' RefData = RpraetorDataMat[-1,],
#' ShapeData = TRUE,
#' ShapeDim = 2,
#' DistMethod = "Proc",
#' LongRange = Long.Range,
#' LatRange = Lat.Range,
#' RangeSamp = R.Samp,
#' Verbose = FALSE,
#' Validate = FALSE,
#' PlotValCor = rThres$`Provenancing.Correlation.95%.Confidence`,
#' PlotProv = TRUE,
#' Method = 'Spearman')
#'
#' points(x = Rpraetor$Lat.Long$Long[1], y=Rpraetor$Lat.Long$Lat[1], col='blue', pch=16)
#'
#'
#' @export
#'
#'
IDbyDistanceRawData <- function(LatLongs, TargetData, RefData, ShapeData=TRUE, ShapeDim=2, DistMethod=c("Euc", "Proc"), LongRange=c(0,0), LatRange=c(0,0), RangeSamp=10, Verbose=TRUE, PrintProg=FALSE, Validate= FALSE, ValidLatLongs, PlotRes=TRUE, HeatHue= c(.15, 1), TileSize=2, PlotProv=FALSE, PlotValCor, Method=c('Spearman', 'Pearson'), PacificCent=FALSE){
UserInputAssessment(LatLongs, RefData, Method, RefDistMat = NULL, DistVec = NULL)
#making LatLongs a dataframe
LatLongs <- as.data.frame(LatLongs)
colnames(LatLongs) <- c("Lats", "Longs")
#organising data for ease of analysis
#combining ref and target specimens with target first
if (ShapeData==TRUE){
TotalShape.raw <- rbind(TargetData, RefData)
gpaRes <- shapes::procGPA(Mat2Array(TotalShape.raw, LMdim = ShapeDim))
TotalShape <- Array2Mat(gpaRes$rotated)
} else {
TotalShape <- rbind(TargetData, RefData)
}
#calculating euclidean distances between specimens
#and then extracting distances to target specimen only
if (DistMethod=="Euc"){
ShapeDist <- as.matrix(stats::dist(TotalShape))[-1,1]
} else if (DistMethod=="Proc" && ShapeData==TRUE){
ShapeDist <- apply(X = Mat2Array(TotalShape[-1,], LMdim=ShapeDim), MARGIN = 3, FUN = shapes::procdist, y=matrix(TotalShape[1,], nrow = length(TotalShape[1,])/ShapeDim, ncol = ShapeDim, byrow = TRUE))
} else if (DistMethod=="Proc" && ShapeData==FALSE){
stop("Error: Procrustes distance selected, but ShapeData argument is set to FALSE")
}
#creating an empty object to be populated by results
CoordsHeat <- NULL
#this function has two operations
#either it can come up with the correlation value at a specific point (Validate=TRUE)
#or it can come up with correlation values across the entire map
#here we have the first option which is useful for the validation process
#by doing this first option for all the specimens (in a loop) we can build a distribution
#of the correlation values that will correctly cover the specimens true location
#but see IDbyDistanceRawDataCCV function for looping to be done automatically
if (Validate==TRUE){
names(ValidLatLongs) <- c("Lats", "Longs")
#This generates all the distances from the point on the map to all the specimen locations
GeographicDist <- GeoDist2Point(RefLatLongs = LatLongs, TargetLatLong = c(ValidLatLongs$Lats, ValidLatLongs$Longs))
#just checking the correlation visually
#plot((ShapeDist), (GeographicDist))
if (Method=='Spearman'){
#running the correlation to generate r
CorRes <- suppressWarnings(stats::cor.test(x = ShapeDist, y = GeographicDist, method = "spearman"))
} else if (Method=='Pearson'){
CorRes <- suppressWarnings(stats::cor.test(x = ShapeDist, y = GeographicDist, method = "pearson"))
}
#combining the individual results and organising them
results <- c(ValidLatLongs, CorRes$estimate)
#adding the results of this round to the previous results
CoordsHeat <- rbind(CoordsHeat, results)
} else if (Validate==FALSE){
#this is the second of the above options and not for validation
#here the function goes through the entire map and generated correlation values
#the level of detail is set by "RangeSamp"
#for example if RangeSamp is set to 1 the following procedure
#will generate correlation values at every degree of the map
#if the RangeSamp is set to 2 it will generate for every other degree
#this can speed things up for quick and dirty tests
#creating a range that will cover the whole geographic area of interest
#this is for the function to loop through later on
if (length(RangeSamp)==1){
LongSamp <- RangeSamp
LatSamp <- RangeSamp
} else if (length(RangeSamp)==2){
LongSamp <- RangeSamp[2]
LatSamp <- RangeSamp[1]
} else if (length(RangeSamp)>2){
stop("too many dimensions in RangeSamp")
}
LongRangeSteps <- (LongRange[2]-LongRange[1])/(LongSamp-2)
LatRangeSteps <- (LatRange[2]-LatRange[1])/(LatSamp-2)
#this output for Lat/Long ways provides what the loop should sequence through
if (PacificCent==TRUE){
MidRange <- seq(LongRange[1], LongRange[2]+360, by = LongRangeSteps*-1)
#MidRange[which(MidRange>=180)] <- MidRange[which(MidRange>=180)]-360
Longways <- c(LongRange[1]+LongRangeSteps, MidRange, LongRange[2]+(LongRangeSteps*-1)+360)
} else {
Longways <- c(LongRange[1]-LongRangeSteps, seq(LongRange[1], LongRange[2], by = LongRangeSteps), LongRange[2]+LongRangeSteps)
}
Latways <- c(LatRange[1]-LatRangeSteps, seq(LatRange[1], LatRange[2], by = LatRangeSteps), LatRange[2]+LatRangeSteps)
#carrying out iterative analyses across the geographic region defined by LongRange and LatRange
for (i in Longways){
for (j in Latways){
coord <- c(j, i)
GeographicDist <- GeoDist2Point(RefLatLongs = LatLongs, TargetLatLong = coord)
if (Method=='Spearman'){
#running the correlation to generate r
CorRes <- suppressWarnings(stats::cor.test(x = ShapeDist, y = GeographicDist, method = "spearman"))
} else if (Method=='Pearson'){
CorRes <- suppressWarnings(stats::cor.test(x = ShapeDist, y = GeographicDist, method = "pearson"))
}
#combining the individual results and organising them
results <- c(as.character(j),i, CorRes$estimate)
#adding the results of this round to the previous results
CoordsHeat <- rbind(CoordsHeat, results)
}
if (PrintProg==TRUE){
svMisc::progress(value = which(Longways==i), max.value = length(Longways), progress.bar = FALSE)
Sys.sleep(0.01)
}
}
}
#converting the results into a data frame
CoordsHeat <- as.data.frame(CoordsHeat, row.names = 1:dim(CoordsHeat)[1])
#naming the variables
names(CoordsHeat) <- c("Lats", "Longs", "Cor")
if (PacificCent==TRUE){
PlottingMap <- "mapdata::world2Hires"
} else {
PlottingMap <- "world"
}
#if there is not a validating the data then we this means
#we either have the validation result from a previous analyses and we can plot it
#or we don't and we're not yet interested in it
if (Validate==FALSE){
if (PlotRes==TRUE){
maps::map(PlottingMap, xlim=c(min(Longways), max(Longways)), ylim=c(min(Latways), max(Latways)), interior=FALSE, col="black", bg=graphics::par(bg="white"))
#creating colour scale from max and min correlation based on which variable we're using
CoordsHeatNum <- chr2nu(CoordsHeat$Cor)
OriginLoc <- CoordsHeat[which(CoordsHeatNum==max(CoordsHeatNum)),]
CoordsHeatscaled <- (CoordsHeatNum-min(CoordsHeatNum))/(max(CoordsHeatNum)-min(CoordsHeatNum))
CoordsHeats <- grDevices::hsv(h = HeatHue[1], v = 1, s = CoordsHeatscaled, alpha = HeatHue[2])
#plotting the correlations
graphics::points(x = as.character(CoordsHeat$Longs), y = as.character(CoordsHeat$Lats), pch=15, col=CoordsHeats, cex=TileSize)
#here if we want to plot a polygon of the region that
#approximates the region the specimens came from (with whatever level of confidence we have selected)
if (PlotProv==TRUE){
Select.95.conf <- which(chr2nu(CoordsHeat$Cor)>PlotValCor)
ApproxOrigin <- CoordsHeat[Select.95.conf,]
Latvar <- stats::var(chr2nu(ApproxOrigin$Lats))
Longvar <- stats::var(chr2nu(ApproxOrigin$Longs))
if (is.na(Latvar)){
RthresMessage <- paste("A provenancing region was not identified at this r threshold. R threshold set to:", PlotValCor, sep = " ")
MaxCorMessage <- paste("The maximum correlation value acheived was:", max(chr2nu(CoordsHeat$Cor)), sep = " ")
WarningMessage <- "This may be because the resolution used is too low so the likely origin region has been overlooked or alternatively the specimen could not be successfully identified because the reference material does not suffieciently reflect the morphology of the unknown specimen. Please adjust the sampling resolution by changing the R.Samp argument or change the r threshold or consider the specimen unidentifiable to a given region."
warning(paste(RthresMessage, MaxCorMessage, WarningMessage, sep = " "))
} else if (Latvar==0 || Longvar==0){
warning("The resolution used for identifying a region of identification is too low to plot a polygon of the likely region of origin. Therefore, the grid squares that were identified by the r threshold as a possible region of origin have been highlighted. Please set the R.Samp argument to a higher value if a polygon of the most likely region of origin is desired.")
polycol <- grDevices::hsv(h = HeatHue[1], s = 1, v = .8, alpha = HeatHue[2])
graphics::points(x = as.character(ApproxOrigin$Longs), y = as.character(ApproxOrigin$Lats), pch=22, bg=polycol, cex=TileSize+.1)
} else {
contour.95 <- Construct_contour(ApproxOrigin)
polycol <- grDevices::hsv(h = HeatHue[1], s = 1, v = .8, alpha = HeatHue[2])
graphics::polygon(contour.95$x, contour.95$y, col=NA, border=polycol, lwd=2)
}
}
maps::map(PlottingMap, xlim=c(min(Longways), max(Longways)), ylim=c(min(Latways), max(Latways)), interior=FALSE, col="black", bg=graphics::par(bg="white"), add=T)
graphics::points(x = as.character(LatLongs$Longs), y = as.character(LatLongs$Lats), pch=23, bg='orange', cex=1)
maps::map.axes()
}
}
if (Verbose==TRUE|Validate==TRUE){
return(CoordsHeat)
} else {
OriginLocCor <- CoordsHeat[which(CoordsHeat$Cor==max(chr2nu(CoordsHeat$Cor), na.rm = TRUE)),]
return(list(Cor=OriginLocCor))
}
}
#' Spatial Provenancing Correct Cross-Validation calculation from raw data
#'
#' This function takes raw variables of reference specimens with known spatial origins
#' and uses euclidean distances to calculate the correllation value that would be required to
#' correctly provenancing them in a test. This is achieved by a leave-one-out procedure. Shape variables
#' can be specified and if so Procrustes distances can be calculated. If a cross-validation procedure
#' that removes more than one specimen from the reference dataset is desired then it is recommended that
#' the validate function of the IDbyDistanceRawData method be used with a loop.
#' @param ProvConfidence is a value between 0 and 1 indicating the confidence level that is desired for spatial provenancing.
#' @inheritParams IDbyDistanceRawData
#' @return If Verbose is set to FALSE then a list of a single object containing the correlation value at the required confidence interval is returned. If Verbose is set to TRUE then a list is returned with two objects: the first is the correlation value at the required confidence interval; the second a dataframe of coordinates and the spatial-trait correlation values at the true locations of each specimen.
#' @details When used for shape data and for Procrustes distances this function makes use of the \code{\link[shapes]{procGPA}} and \code{\link[shapes]{procdist}} functions from the \code{shapes} package. When Euclidean distances are employed the \code{\link[stats]{dist}} function of the base \code{stats} package is used.
#' This method also makes use of the \code{\link[stats]{cor.test}} function from the \code{stats} package. When the \code{PrintProg} is set to TRUE, the \code{\link[svMisc]{progress}} function of the \code{svMisc} package is used.
#' The map plotting of this function makes use of the functions of the \code{maps} package.
#'
#' @section Citations:
#'
#' Original S code by Richard A. Becker, Allan R. Wilks. R version by Ray Brownrigg.
#' Enhancements by Thomas P Minka and Alex Deckmyn. (2017). maps: Draw Geographical Maps. R
#' package version 3.2.0. https://CRAN.R-project.org/package=maps
#'
#' Ian L. Dryden (2016). shapes: Statistical Shape Analysis. R package version 1.1-13.
#' https://CRAN.R-project.org/package=shapes
#'
#' Grosjean, Ph. (2016). svMisc: SciViews-R. UMONS, Mons, Belgium.
#' http://www.sciviews.org/SciViews-R.
#'
#' @author Ardern Hulme-Beaman
#'
#' @examples
#' Range.Exp <- .5
#'
#' Long.Range <- c(floor(min(Rpraetor$Lat.Long$Long))
#' -Range.Exp,ceiling(max(Rpraetor$Lat.Long$Long)+Range.Exp))
#' Lat.Range <- c(floor(min(Rpraetor$Lat.Long$Lat))
#' -Range.Exp,ceiling(max(Rpraetor$Lat.Long$Lat)+Range.Exp))
#'
#' RpraetorDataMat <- Array2Mat(Rpraetor$LMs)
#'
#' rThres <- IDbyDistanceRawDataCCV(LatLongs = Rpraetor$Lat.Long,
#' RefData = RpraetorDataMat,
#' ShapeData=TRUE,
#' ShapeDim=2,
#' DistMethod= "Proc",
#' Verbose = TRUE,
#' ProvConfidence = .95,
#' PrintProg = FALSE,
#' Method= 'Spearman')
#'
#' @export
IDbyDistanceRawDataCCV <- function(LatLongs, RefData, ShapeData=TRUE, ShapeDim=2, DistMethod=c("Euc", "Proc"), Verbose=TRUE, PrintProg=TRUE, ProvConfidence=0.95, Method=c('Spearman', 'Pearson')){
UserInputAssessment(LatLongs, RefData, Method, RefDistMat = NULL, DistVec = NULL)
#making LatLongs a dataframe
LatLongs <- as.data.frame(LatLongs)
colnames(LatLongs) <- c("Lats", "Longs")
#organising data for ease of analysis
#combining ref and target specimens with target first
if (ShapeData==TRUE){
gpaRes <- shapes::procGPA(Mat2Array(RefData, LMdim = ShapeDim))
TotalShape <- Array2Mat(gpaRes$rotated)
} else {
TotalShape <- RefData
}
#calculating euclidean distances between specimens
#and then extracting distances to target specimen only
if (DistMethod=="Euc"){
ShapeDistMat <- as.matrix(stats::dist(TotalShape))
} else if (DistMethod=="Proc" && ShapeData==TRUE){
ShapeDistMat <- ProcDistanceTable(Mat2Array(TotalShape, LMdim=ShapeDim))
} else if (DistMethod=="Proc" && ShapeData==FALSE){
stop("Error: Procrustes distance selected, but ShapeData argument is set to FALSE")
}
#creating an empty object to be populated by results
CoordsHeat <- matrix(NA, nrow = dim(RefData)[1], ncol = 3)
for (i in 1:dim(TotalShape)[1]){
#i <- 1
ShapeDist <- ShapeDistMat[i,-i]
#This generates all the distances from the point on the map to all the specimen locations
GeographicDist <- GeoDist2Point(RefLatLongs = LatLongs[-i,], TargetLatLong = LatLongs[i,])
if (Method=='Spearman'){
#running the correlation to generate r
CorRes <- suppressWarnings(stats::cor.test(x = ShapeDist, y = GeographicDist, method = "spearman"))
} else if (Method=='Pearson'){
CorRes <- suppressWarnings(stats::cor.test(x = ShapeDist, y = GeographicDist, method = "pearson"))
}
#combining the individual results and organising them
results <- c(LatLongs$Lats[i], LatLongs$Longs[i], CorRes$estimate)
#adding the results of this round to the previous results
CoordsHeat[i,] <- results
if (PrintProg==TRUE){
svMisc::progress(value = i, max.value = dim(TotalShape)[1], progress.bar = FALSE)
Sys.sleep(0.01)
}
}
#converting the results into a data frame
CoordsHeat <- as.data.frame(CoordsHeat, row.names = 1:dim(CoordsHeat)[1])
#naming the variables
names(CoordsHeat) <- c("Lats", "Longs", "Cor")
ProvCor <- stats::quantile(CoordsHeat$Cor, 1-ProvConfidence)
if (Verbose==TRUE){
ProvResults <- list(Provenancing.Correlation=as.numeric(ProvCor), CCV.Cor.Vals=CoordsHeat$Cor)
names(ProvResults)[1] <- paste(names(ProvResults)[1], ".", ProvConfidence*100, "%.Confidence", sep="")
return(ProvResults)
} else {
ProvResults <- list(Provenancing.Correlation=as.numeric(ProvCor))
names(ProvResults)[1] <- paste(names(ProvResults)[1], ".", ProvConfidence*100, "%.Confidence", sep="")
return(ProvResults)
}
}
|
d84e5f182c92620f802c3ad44ae202bed455f4d9
|
38a3fe4b8fd21002613cc4f453cc4b1e72e2a0fc
|
/allnba.R
|
6c026a2303a0116b53df6920f92b71ba0c988997
|
[] |
no_license
|
tmayhew/alltimeNBA
|
00145e1448037754aaab841ed45528db8fc7e3ab
|
f51afa7a041a0b7352cfc7a199759a62be34b476
|
refs/heads/master
| 2022-09-05T02:20:45.222474
| 2020-05-19T05:05:23
| 2020-05-19T05:05:23
| 260,075,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,319
|
r
|
allnba.R
|
library(rvest)
library(tidyverse)
library(ggthemes)
library(Rfast)
g1 = read.csv("finalRegSeason/group1.csv")[,-1]
summary(g1$Yr)
names = g1 %>% select(Player, Pos, Age, Tm, Yr)
dat1 = g1 %>% select(-Player, -Pos, -Age, -Tm, -Yr, -GS, -X3P, -X3PA, -X3P., -ORB, -DRB, -STL, -BLK, -TOV, -X3PAr, -ORB., -DRB., -STL., -BLK., -TOV., -USG., -OBPM, -DBPM, -BPM, -VORP)
pr.out = prcomp(dat1, scale = T)
PC1 = -1*(pr.out$x[,1])
g1p = cbind.data.frame(g1, PC1)
g2 = read.csv("finalRegSeason/group2.csv")[,-1]
summary(g2$Yr)
names = g2 %>% select(Player, Pos, Age, Tm, Yr)
dat1 = g2 %>% select(-Player, -Pos, -Age, -Tm, -Yr, -GS, -X3P, -X3PA, -X3P., -X3PAr)
pr.out = prcomp(dat1, scale = T)
PC1 = -1*(pr.out$x[,1])
g2p = cbind.data.frame(g2, PC1)
rbind.data.frame(g1p, g2p) %>% arrange(desc(PC1)) %>% select(Player, Yr, PC1)
g3 = read.csv("finalRegSeason/group3.csv")[,-1] summary(g3$Yr)
names = g3 %>% select(Player, Pos, Age, Tm, Yr)
dat1 = g3 %>% select(-Player, -Pos, -Age, -Tm, -Yr, -GS, -X3P, -X3PA, -X3P., -X3PAr)
pr.out = prcomp(dat1, scale = T)
PC1 = -1*(pr.out$x[,1])
g3p = cbind.data.frame(g3, PC1)
reg = rbind.data.frame(g1p, g2p, g3p) %>% arrange(desc(PC1));nrow(reg)
reg = reg %>% filter(MP > 100);nrow(reg)
reg %>% head(20)
t1cut = round(nrow(reg)/5,0);t1cut
t2cut = round(nrow(reg)/5,0)*2;t2cut
t3cut = round(nrow(reg)/5,0)*3;t3cut
t4cut = round(nrow(reg)/5,0)*4;t4cut
t1Pcut = reg$PC1[t1cut]
t2Pcut = reg$PC1[t2cut]
t3Pcut = reg$PC1[t3cut]
t4Pcut = reg$PC1[t4cut]
reg$tier = as.factor(ifelse(reg$PC1 >= t1Pcut, 1, ifelse(reg$PC1 >= t2Pcut, 2, ifelse(reg$PC1 >= t3Pcut, 3, ifelse(reg$PC1 >= t4Pcut, 4, 5)))))
reg %>% ggplot(aes(x = tier, y = PC1)) + geom_boxplot()
all.nbadf = read.csv("allnba.csv")[,-1] %>% select(Year, Player, all.nba)
all.nbadf$firstteam = ifelse(all.nbadf$all.nba == 1, 1, 0)
all.nbadf$secthird = ifelse(all.nbadf$all.nba == 1, 0, 1)
reg$Player = as.character(reg$Player)
for (i in 1:nrow(reg)){
sp = strsplit(x = reg$Player[i], split = "")[[1]]
if (sp[length(sp)] == "*"){
reg$Player[i] = paste(sp[1:(length(sp)-1)], collapse = "")
} else{
reg$Player[i] = paste(sp[1:(length(sp))], collapse = "")
}
}
names(all.nbadf)[1] = c("Yr")
head(all.nbadf)
head(reg)
full_join(all.nbadf, reg, by = c("Player", "Yr"))
|
6aec13d1de14aad79881a961fcf3d7901a4c71da
|
a806c8ff8c9064d424b4dce9b2fe444d981ae5b5
|
/plot1.R
|
8db83e8d3e0c10e939e3dcca2d6cc543e805ba0d
|
[] |
no_license
|
nterpitskaya/ExData_Plotting1
|
d650fec71c2f8505ec1c5a18d60780fd2591f2f5
|
9d70da2e690182a7e9bc8ce8c4cafd275f90c33f
|
refs/heads/master
| 2020-03-18T15:54:41.179235
| 2018-05-27T15:11:13
| 2018-05-27T15:11:13
| 134,936,465
| 0
| 0
| null | 2018-05-26T06:43:37
| 2018-05-26T06:43:36
| null |
UTF-8
|
R
| false
| false
| 556
|
r
|
plot1.R
|
# read data from the file
hpcall<-read.csv("household_power_consumption.txt", sep = ";", na.strings = "?")
# convert date to date format
hpcall$Date<-strptime(hpcall$Date,"%d/%m/%Y")
# extract only data for Feb 1 and 2 of year 2007
hpc<-hpcall[(hpcall$Date == as.Date("2007-02-01"))|(hpcall$Date == as.Date("2007-02-02")),]
# open png device
png("plot1.png",width = 480, height = 480, bg = "transparent")
# plot data
hist(hpc$Global_active_power, col = "red", main = "Global active power", xlab = "Global active power(kilowatts)")
# close device
dev.off()
|
3887052c67d7279594c6b67ebc09fe49f0ded9d6
|
06a13729e9f4bf714c7697247236061f0bce5979
|
/R/doubletFinder.R
|
079acd56668df6646c8903e24a82f49b75f9e6e5
|
[
"MIT"
] |
permissive
|
scfurl/m3addon
|
1215919062cd6c857402a1851c5b438fa39870bd
|
2e9d0dcc33eec95c8400d1dcf291565fc9d31b98
|
refs/heads/master
| 2021-08-17T21:09:43.469291
| 2021-07-29T20:59:50
| 2021-07-29T20:59:50
| 193,642,890
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,058
|
r
|
doubletFinder.R
|
#' doubletFinder
#'
#' @description Core doublet prediction function of the DoubletFinder package. Generates artifical
#' doublets from an existing, pre-processed cdsrat object. Real and artificial data are then merged
#' and pre-processed using parameters utilized for the existing cdsrat object. PC distance matrix is
#' then computed and used the measure the proportion of artificial nearest neighbors (pANN) for every
#' real cell. pANN is then thresholded according to the number of expected doublets to generate final
#' doublet predictions.
#' @usage cds <- doubletFinder(cds, PCs, pN = 0.25, pK, nExp, reuse.pANN = FALSE)
#' @param cds Input cell_data_set object.
#' @param PCs Number of statistically-significant principal components (e.g., as estimated from PC
#' elbow plot)
#' @param ... arguments passed to 1) \code{calculate_gene_dispersion} and 2) \code{select_genes}:
#' note that default for top_n passsed to \code{select_genes} in this context is 2000 features. See specific function documentation for further information
#' on acceptable arguments to be passed these functions.
#' @param pN The number of generated artificial doublets, expressed as a proportion of the merged
#' real-artificial data. Default is set to 0.25, based on observation that DoubletFinder performance is
#' largely pN-invariant (see McGinnis, Murrow and Gartner 2019, Cell Systems).
#' @param pK The PC neighborhood size used to compute pANN, expressed as a proportion of the merged
#' real-artificial data. No default is set, as pK should be adjusted for each scRNA-seq dataset. Optimal
#' pK values can be determined using mean-variance-normalized bimodality coefficient.
#' @param nExp The total number of doublet predictions produced. This value can best be estimated
#' from cell loading densities into the 10X/Drop-Seq device, and adjusted according to the estimated
#' proportion of homotypic doublets.
#' @param genes if "all", use all genes; if "recalc", recalculate ordering genes using \code{calculate_gene_dispersion} and
#' \code{select_genes}, passing arguments to each of these functions using ....; if "same" use ordering genes
#' specified in cds; or a vector of ordering genes to be used.
#' @param reuse.pANN Metadata column name for previously-generated pANN results. Argument should be set to
#' FALSE (default) for initial DoubletFinder runs. Enables fast adjusting of doublet predictions for
#' different nExp.
#' @importFrom fields rdist
#' @references McGinnis, Murrow and Gartner 2019, Cell Systems; https://github.com/chris-mcginnis-ucsf/DoubletFinder
#' @export
doubletFinder_v3 <- function(cds, PCs=1:100, pN = 0.25, pK, nExp, genes=c("same", "all", "recalc"), ...) {
dots <- list(...)
og_done<-FALSE
sg_args <-c("logmean_ul", "logmean_ll", "top_n", "fit_min", "fit_max")
cd_args<-c("id", "symbol_tag","method", "remove_outliers", "q")
#gather relevant args
rel_args<-dots[names(dots) %in% c(sg_args, cd_args)]
if(!"top_n" %in% names(rel_args)){
rel_args<-c(list(top_n=2000), rel_args)
}
if(length(genes)>1 & all(genes %in% c("same", "all", "recalc"))){
genes <- "same"
}
if(length(genes)==1 & genes %in% c("same", "all", "recalc")){
if(genes=="all") {
message("Using all features")
ord_genes <- rownames(fData(cds))
og_done <-TRUE
}
if(genes=="recalc"){
message("Recalculating ordering features using the following arguments:\nCalculate Dispersion:\n")
print(rel_args[names(rel_args) %in% cd_args])
message("\nSelect Genes:\n")
print(rel_args[names(rel_args) %in% sg_args])
rel_args<-c(list(cds=cds), rel_args)
cds<-do.call(calculate_gene_dispersion, rel_args[names(rel_args) %in% c("cds", cd_args)])
cds<-do.call(select_genes, rel_args[names(rel_args) %in% c("cds", sg_args)])
ord_genes<-get_ordering_genes(cds)
og_done <-TRUE
}
if(genes=="same"){
message("Using existing ordering features")
ord_genes<-get_ordering_genes(cds)
og_done <-TRUE
}
}
if(length(genes)>1 & all(genes %in% rownames(exprs(cds)))){
ord_genes<-genes
message("Using supplied ordering genes")
og_done <-TRUE
}
if(!og_done & all(genes %in% rownames(exprs(cds)))){
stop("Genes not found in cds; they must be rownames of exprs(cds)")
}
real.cells <- rownames(cds@colData)
data <- exprs(cds)[, real.cells]
n_real.cells <- length(real.cells)
n_doublets <- round(n_real.cells/(1 - pN) - n_real.cells)
message(paste("Creating",n_doublets,"artificial doublets...",sep=" "))
real.cells1 <- sample(real.cells, n_doublets, replace = TRUE)
real.cells2 <- sample(real.cells, n_doublets, replace = TRUE)
doublets <- (data[, real.cells1] + data[, real.cells2])/2
colnames(doublets) <- paste("X", 1:n_doublets, sep = "")
data_wdoublets <- cbind(data, doublets)
## Pre-process cds object
message("Creating Monocle3 object with doublets...")
cds_wdoublets <- new_cell_data_set(data_wdoublets, gene_metadata = mcols(cds))
message("Running PCA...")
cds_wdoublets <- preprocess_cds(cds_wdoublets, num_dim = length(PCs), verbose = T, genes=ord_genes)
pca.coord <- cds_wdoublets@reducedDims$PCA[ , PCs]
cell.names <- rownames(colData(cds_wdoublets))
nCells <- length(cell.names)
rm(cds_wdoublets); gc() # Free up memory
## Compute PC distance matrix
message("Calculating PC distance matrix...")
dist.mat <- rdist(pca.coord)
## Compute pANN
message("Computing pANN...")
pANN <- as.data.frame(matrix(0L, nrow = n_real.cells, ncol = 1))
rownames(pANN) <- real.cells
colnames(pANN) <- "pANN"
k <- round(nCells * pK)
for (i in 1:n_real.cells) {
neighbors <- order(dist.mat[, i])
neighbors <- neighbors[2:(k + 1)]
neighbor.names <- rownames(dist.mat)[neighbors]
pANN$pANN[i] <- length(which(neighbors > n_real.cells))/k
}
message("Classifying doublets..")
classifications <- rep("Singlet",n_real.cells)
classifications[order(pANN$pANN[1:n_real.cells], decreasing=TRUE)[1:nExp]] <- "Doublet"
colData(cds)[, paste("pANN",pN,pK,nExp,sep="_")] <- pANN[rownames(colData(cds)), 1]
colData(cds)[, paste("DF.classifications",pN,pK,nExp,sep="_")] <- classifications
return(cds)
}
#' @importFrom pbmcapply pbmclapply
#' @export
#'
paramSweep_v3 <- function(cds, PCs=1:10, sct = FALSE, num.cores=detectCores()/2, genes=c("same", "all", "recalc"), ...) {
## Set pN-pK param sweep ranges
pK <- c(0.0005, 0.001, 0.005, seq(0.01,0.5,by=0.01))
pN <- seq(0.05,0.3,by=0.05)
## Remove pK values with too few cells
min.cells <- round(nrow(colData(cds))/(1-0.05) - nrow(colData(cds)))
pK.test <- round(pK*min.cells)
pK <- pK[which(pK.test >= 1)]
##get ordering genes
og_done<-FALSE
sweep.res.list = list()
list.ind = 0
dots <- list(...)
sg_args <-c("logmean_ul", "logmean_ll", "top_n", "fit_min", "fit_max")
cd_args<-c("id", "symbol_tag","method", "remove_outliers", "q")
#gather relevant args
rel_args<-dots[names(dots) %in% c(sg_args, cd_args)]
if(!"top_n" %in% names(rel_args)){
rel_args<-c(list(top_n=2000), rel_args)
}
if(length(genes)>1 & all(genes %in% c("same", "all", "recalc"))){
genes <- "same"
}
if(length(genes)==1 & genes %in% c("same", "all", "recalc")){
if(genes=="all") {
message("Using all features")
ord_genes <- rownames(fData(cds))
og_done <-TRUE
}
if(genes=="recalc"){
message("Recalculating ordering features using the following arguments:\nCalculate Dispersion:\n")
print(rel_args[names(rel_args) %in% cd_args])
message("\nSelect Genes:\n")
print(rel_args[names(rel_args) %in% sg_args])
rel_args<-c(list(cds=cds), rel_args)
cds<-do.call(calculate_gene_dispersion, rel_args[names(rel_args) %in% c("cds", cd_args)])
cds<-do.call(select_genes, rel_args[names(rel_args) %in% c("cds", sg_args)])
ord_genes<-get_ordering_genes(cds)
og_done <-TRUE
}
if(genes=="same"){
message("Using existing ordering features")
ord_genes<-get_ordering_genes(cds)
og_done <-TRUE
}
}
if(length(genes)>1 & all(genes %in% rownames(exprs(cds)))){
ord_genes<-genes
message("Using supplied ordering genes")
og_done <-TRUE
}
if(!og_done & all(genes %in% rownames(exprs(cds)))){
stop("Genes not found in cds; they must be rownames of exprs(cds)")
}
## Down-sample cells to 10000 (when applicable) for computational effiency
if (nrow(colData(cds)) > 10000) {
real.cells <- rownames(colData(cds))[sample(1:nrow(colData(cds)), 10000, replace=FALSE)]
data <- exprs(cds)[ , real.cells]
n.real.cells <- ncol(data)
}
if (nrow(colData(cds)) <= 10000){
real.cells <- rownames(colData(cds))
data <- exprs(cds)
n.real.cells <- ncol(data)
}
data<-data[rownames(data) %in% ord_genes,]
## Iterate through pN, computing pANN vectors at varying pK
#no_cores <- detectCores()-1
if(num.cores>1){
#require(parallel)
#cl <- makeCluster(num.cores)
output2 <- pbmclapply(as.list(1:length(pN)),
FUN = parallel_paramSweep_v3,
n.real.cells,
real.cells,
pK,
pN,
data,
PCs,
mc.cores=num.cores)
#stopCluster(cl)
}else{
output2 <- lapply(as.list(1:length(pN)),
FUN = parallel_paramSweep_v3,
n.real.cells,
real.cells,
pK,
pN,
data,
PCs)
}
## Write parallelized output into list
sweep.res.list <- list()
list.ind <- 0
for(i in 1:length(output2)){
for(j in 1:length(output2[[i]])){
list.ind <- list.ind + 1
sweep.res.list[[list.ind]] <- output2[[i]][[j]]
}
}
## Assign names to list of results
name.vec <- NULL
for (j in 1:length(pN)) {
name.vec <- c(name.vec, paste("pN", pN[j], "pK", pK, sep = "_" ))
}
names(sweep.res.list) <- name.vec
return(sweep.res.list)
}
#' @importFrom fields rdist
#' @importFrom S4Vectors DataFrame
#' @export
#'
parallel_paramSweep_v3 <- function(n, n.real.cells, real.cells, pK, pN, data, PCs) {
sweep.res.list = list()
list.ind = 0
## Make merged real-artifical data
message(paste("Creating artificial doublets for pN = ", pN[n]*100,"%",sep=""))
n_doublets <- round(n.real.cells/(1 - pN[n]) - n.real.cells)
real.cells1 <- sample(real.cells, n_doublets, replace = TRUE)
real.cells2 <- sample(real.cells, n_doublets, replace = TRUE)
doublets <- (data[, real.cells1] + data[, real.cells2])/2
colnames(doublets) <- paste("X", 1:n_doublets, sep = "")
data_wdoublets <- cbind(data, doublets)
## Pre-process cds object
message("Creating Monocle3 object with doublets...")
cds_wdoublets <- new_cell_data_set(data_wdoublets, gene_metadata = DataFrame(gene_short_name=rownames(data), row.names = rownames(data)))
message("Running PCA...")
cds_wdoublets <- preprocess_cds(cds_wdoublets, num_dim = length(PCs), verbose = T)
pca.coord <- cds_wdoublets@reducedDims$PCA[ , PCs]
cell.names <- rownames(colData(cds_wdoublets))
nCells <- length(cell.names)
message("Calculating PC distance matrix...")
nCells <- nrow(colData(cds_wdoublets))
pca.coord <- cds_wdoublets@reducedDims$PCA[ , PCs]
rm(cds_wdoublets)
gc()
dist.mat <- rdist(pca.coord)[,1:n.real.cells]
## Pre-order PC distance matrix prior to iterating across pK for pANN computations
message("Defining neighborhoods...")
for (i in 1:n.real.cells) {
dist.mat[,i] <- order(dist.mat[,i])
}
## Trim PC distance matrix for faster manipulations
ind <- round(nCells * max(pK))+5
dist.mat <- dist.mat[1:ind, ]
## Compute pANN across pK sweep
message("Computing pANN across all pK...")
for (k in 1:length(pK)) {
print(paste("pK = ", pK[k], "...", sep = ""))
pk.temp <- round(nCells * pK[k])
pANN <- as.data.frame(matrix(0L, nrow = n.real.cells, ncol = 1))
colnames(pANN) <- "pANN"
rownames(pANN) <- real.cells
list.ind <- list.ind + 1
for (i in 1:n.real.cells) {
neighbors <- dist.mat[2:(pk.temp + 1),i]
pANN$pANN[i] <- length(which(neighbors > n.real.cells))/pk.temp
}
sweep.res.list[[list.ind]] <- pANN
}
return(sweep.res.list)
}
#' @importFrom ROCR prediction
#' @importFrom ROCR performance
#' @importFrom KernSmooth bkde
#' @export
#'
summarizeSweep <- function(sweep.list, GT = FALSE, GT.calls = NULL) {
#require(KernSmooth); require(ROCR); require(modes)
## Set pN-pK param sweep ranges
name.vec <- names(sweep.list)
name.vec <- unlist(strsplit(name.vec, split="pN_"))
name.vec <- name.vec[seq(2, length(name.vec), by=2)]
name.vec <- unlist(strsplit(name.vec, split="_pK_"))
pN <- as.numeric(unique(name.vec[seq(1, length(name.vec), by=2)]))
pK <- as.numeric(unique(name.vec[seq(2, length(name.vec), by=2)]))
## Initialize data structure w/ or w/o AUC column, depending on whether ground-truth doublet classifications are available
if (GT == TRUE) {
sweep.stats <- as.data.frame(matrix(0L, nrow=length(sweep.list), ncol=4))
colnames(sweep.stats) <- c("pN","pK","AUC","BCreal")
sweep.stats$pN <- factor(rep(pN, each=length(pK), levels = pN))
sweep.stats$pK <- factor(rep(pK, length(pN),levels = pK))
}
if (GT == FALSE) {
sweep.stats <- as.data.frame(matrix(0L, nrow=length(sweep.list), ncol=3))
colnames(sweep.stats) <- c("pN","pK","BCreal")
sweep.stats$pN <- factor(rep(pN, each=length(pK), levels = pN))
sweep.stats$pK <- factor(rep(pK, length(pN),levels = pK))
}
## Perform pN-pK parameter sweep summary
for (i in 1:length(sweep.list)) {
res.temp <- sweep.list[[i]]
## Use gaussian kernel density estimation of pANN vector to compute bimodality coefficient
gkde <- approxfun(bkde(res.temp$pANN, kernel="normal"))
x <- seq(from=min(res.temp$pANN), to=max(res.temp$pANN), length.out=nrow(res.temp))
sweep.stats$BCreal[i] <- bimodality_coefficient(gkde(x))
if (GT == FALSE) { next }
## If ground-truth doublet classifications are available, perform ROC analysis on logistic
## regression model trained using pANN vector
meta <- as.data.frame(matrix(0L, nrow=nrow(res.temp), ncol=2))
meta[,1] <- GT.calls
meta[,2] <- res.temp$pANN
train.ind <- sample(1:nrow(meta), round(nrow(meta)/2), replace=FALSE)
test.ind <- (1:nrow(meta))[-train.ind]
colnames(meta) <- c("SinDub","pANN")
meta$SinDub <- factor(meta$SinDub, levels = c("Doublet","Singlet"))
model.lm <- glm(SinDub ~ pANN, family="binomial"(link='logit'), data=meta, subset=train.ind)
prob <- predict(model.lm, newdata=meta[test.ind, ], type="response")
ROCpred <- prediction(predictions=prob, labels=meta$SinDub[test.ind])
perf.auc <- performance(ROCpred, measure="auc")
sweep.stats$AUC[i] <- perf.auc@y.values[[1]]
}
return(sweep.stats)
}
#' @export
find.pK <- function(sweep.stats) {
## Implementation for data without ground-truth doublet classifications
'%ni%' <- Negate('%in%')
if ("AUC" %ni% colnames(sweep.stats) == TRUE) {
## Initialize data structure for results storage
bc.mvn <- as.data.frame(matrix(0L, nrow=length(unique(sweep.stats$pK)), ncol=5))
colnames(bc.mvn) <- c("ParamID","pK","MeanBC","VarBC","BCmetric")
bc.mvn$pK <- unique(sweep.stats$pK)
bc.mvn$ParamID <- 1:nrow(bc.mvn)
## Compute bimodality coefficient mean, variance, and BCmvn across pN-pK sweep results
x <- 0
for (i in unique(bc.mvn$pK)) {
x <- x + 1
ind <- which(sweep.stats$pK == i)
bc.mvn$MeanBC[x] <- mean(sweep.stats[ind, "BCreal"])
bc.mvn$VarBC[x] <- sd(sweep.stats[ind, "BCreal"])^2
bc.mvn$BCmetric[x] <- mean(sweep.stats[ind, "BCreal"])/(sd(sweep.stats[ind, "BCreal"])^2)
}
## Plot for visual validation of BCmvn distribution
# par(mar=rep(1,4))
# x <- plot(x=bc.mvn$ParamID, y=bc.mvn$BCmetric, pch=16, col="#41b6c4", cex=0.75)
# x <- lines(x=bc.mvn$ParamID, y=bc.mvn$BCmetric, col="#41b6c4")
# print(x)
#
return(bc.mvn)
}
## Implementation for data with ground-truth doublet classifications (e.g., MULTI-seq, CellHashing, Demuxlet, etc.)
if ("AUC" %in% colnames(sweep.stats) == TRUE) {
## Initialize data structure for results storage
bc.mvn <- as.data.frame(matrix(0L, nrow=length(unique(sweep.stats$pK)), ncol=6))
colnames(bc.mvn) <- c("ParamID","pK","MeanAUC","MeanBC","VarBC","BCmetric")
bc.mvn$pK <- unique(sweep.stats$pK)
bc.mvn$ParamID <- 1:nrow(bc.mvn)
## Compute bimodality coefficient mean, variance, and BCmvn across pN-pK sweep results
x <- 0
for (i in unique(bc.mvn$pK)) {
x <- x + 1
ind <- which(sweep.stats$pK == i)
bc.mvn$MeanAUC[x] <- mean(sweep.stats[ind, "AUC"])
bc.mvn$MeanBC[x] <- mean(sweep.stats[ind, "BCreal"])
bc.mvn$VarBC[x] <- sd(sweep.stats[ind, "BCreal"])^2
bc.mvn$BCmetric[x] <- mean(sweep.stats[ind, "BCreal"])/(sd(sweep.stats[ind, "BCreal"])^2)
}
## Plot for visual validation of BCmvn distribution
# par(mar=rep(1,4))
# x <- plot(x=bc.mvn$ParamID, y=bc.mvn$MeanAUC, pch=18, col="black", cex=0.75,xlab=NA, ylab = NA)
# x <- lines(x=bc.mvn$ParamID, y=bc.mvn$MeanAUC, col="black", lty=2)
# par(new=TRUE)
# x <- plot(x=bc.mvn$ParamID, y=bc.mvn$BCmetric, pch=16, col="#41b6c4", cex=0.75)
# axis(side=4)
# x <- lines(x=bc.mvn$ParamID, y=bc.mvn$BCmetric, col="#41b6c4")
# print(x)
return(bc.mvn)
}
}
#' @export
modelHomotypic <- function(annotations) {
anno.freq <- table(annotations)/length(annotations)
x <- sum(anno.freq^2)
return(x)
}
seurat_paramSweep_v3<-function (seu, PCs = 1:10, sct = FALSE, num.cores = 1)
{
require(Seurat)
require(fields)
pK <- c(5e-04, 0.001, 0.005, seq(0.01, 0.3, by = 0.01))
pN <- seq(0.05, 0.5, by = 0.05)
min.cells <- round(nrow(seu@meta.data)/(1 - 0.05) - nrow(seu@meta.data))
pK.test <- round(pK * min.cells)
pK <- pK[which(pK.test >= 1)]
orig.commands <- seu@commands
if (nrow(seu@meta.data) > 10000) {
real.cells <- rownames(seu@meta.data)[sample(1:nrow(seu@meta.data),
10000, replace = FALSE)]
data <- seu@assays$RNA@counts[, real.cells]
n.real.cells <- ncol(data)
}
if (nrow(seu@meta.data) <= 10000) {
real.cells <- rownames(seu@meta.data)
data <- seu@assays$RNA@counts
n.real.cells <- ncol(data)
}
if (num.cores > 1) {
require(parallel)
#cl <- makeCluster(num.cores)
output2 <- pbmcapply::pbmclapply(as.list(1:length(pN)), FUN = DoubletFinder::parallel_paramSweep_v3,
n.real.cells, real.cells, pK, pN, data, orig.commands,
PCs, sct, mc.cores = num.cores)
#stopCluster(cl)
}
else {
output2 <- lapply(as.list(1:length(pN)), FUN = DoubletFinder::parallel_paramSweep_v3,
n.real.cells, real.cells, pK, pN, data, orig.commands,
PCs, sct)
}
sweep.res.list <- list()
list.ind <- 0
for (i in 1:length(output2)) {
for (j in 1:length(output2[[i]])) {
list.ind <- list.ind + 1
sweep.res.list[[list.ind]] <- output2[[i]][[j]]
}
}
name.vec <- NULL
for (j in 1:length(pN)) {
name.vec <- c(name.vec, paste("pN", pN[j], "pK", pK,
sep = "_"))
}
names(sweep.res.list) <- name.vec
return(sweep.res.list)
}
|
e1c897d07a7c082244750b6ff87256ac112fc4c9
|
01dd2de04c691e2f5aa7623ccc2fc17e1776c266
|
/man/Brick_matrix_isdone.Rd
|
c261f45842daa972f620c604457616444e20be52
|
[] |
no_license
|
fferrari/HiCBricks
|
5d9940ae90dab6a92af470b1b6143a02140b2fdb
|
9144a2555855494bbe054991167dd84a2e5734a9
|
refs/heads/master
| 2020-04-30T23:11:41.506943
| 2019-03-22T12:40:03
| 2019-03-22T12:40:03
| 177,138,353
| 0
| 0
| null | 2019-03-22T12:44:11
| 2019-03-22T12:44:10
| null |
UTF-8
|
R
| false
| true
| 975
|
rd
|
Brick_matrix_isdone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Brick_functions.R
\name{Brick_matrix_isdone}
\alias{Brick_matrix_isdone}
\title{Check if a matrix has been loaded for a chromosome pair.}
\usage{
Brick_matrix_isdone(Brick, chr1, chr2)
}
\arguments{
\item{Brick}{\strong{Required}.
A string specifying the path to the Brick store created with CreateBrick.}
\item{chr1}{\strong{Required}.
A character vector of length 1 specifying the chromosome corresponding to
the rows of the matrix}
\item{chr2}{\strong{Required}.
A character vector of length 1 specifying the chromosome corresponding to
the columns of the matrix}
}
\value{
Returns a logical vector of length 1, specifying if a matrix has
been loaded or not.
}
\description{
Check if a matrix has been loaded for a chromosome pair.
}
\examples{
Brick.file <- system.file("extdata", "test.hdf", package = "HiCBricks")
Brick_matrix_isdone(Brick = Brick.file, chr1 = "chr19", chr2 = "chr19")
}
|
3c915f62e179125b52c080637b3a94d61b3080bd
|
50fa03be02fbea2f4b08006cbcab6a3b22b1ec15
|
/GOterms/Screening Pipeline clusterized compute.R
|
4e8837f16f533de9b5bdab6589a8be0e1d4411d1
|
[] |
no_license
|
YuWei-Lin/scRNA
|
cf8894e50600043d55dc223a122ea5b87087c408
|
1a9805d02144eaec695ce9921c92c5c7cac55e7a
|
refs/heads/main
| 2023-07-10T04:47:52.384243
| 2021-08-09T03:48:50
| 2021-08-09T03:48:50
| 392,888,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,964
|
r
|
Screening Pipeline clusterized compute.R
|
# Import Packages
library(cluster)
library(Rtsne)
library(gplots)
# Import GO and processed ssc-RNAseq data
GOIDName <- read.csv("C:/Users/tonyxp/Desktop/CDF_PureCancer_SC_ReadyFiles/GSE75688_Breast/GOID2GeneMSigDB.csv", header = TRUE, stringsAsFactors = FALSE)
GOIDList <- read.csv("C:/Users/tonyxp/Desktop/CDF_PureCancer_SC_ReadyFiles/GSE75688_Breast/GOID2GeneMSigDB.csv", header = TRUE, stringsAsFactors = FALSE)
Mel.stand <- read.csv("C:/Users/tonyxp/Desktop/CDF_PureCancer_SC_ReadyFiles/GSE75688_Breast/CDF_GSE75688_BreastCan_ZRM30.csv", header = TRUE)
Mel.stand <- t(Mel.stand)
# Pre-screening and generating of GOnames
N <- table(GOIDList$GOID)
GNlist <- NULL
for(i in 1:length(N)){
if(length(intersect(GOIDList[which(GOIDList$GOID==(names(N)[i])),1],colnames(Mel.stand))) >= 50 &
length(intersect(GOIDList[which(GOIDList$GOID==(names(N)[i])),1],colnames(Mel.stand))) <= 500){
AD <- Mel.stand[ ,colnames(Mel.stand)%in%GOIDList[which(GOIDList$GOID==(names(N)[i])),1]]
GNlist <- append(GNlist, names(N)[i], after = length(GNlist))
write.csv(AD, file = paste("C:/Users/tonyxp/Desktop/BreastCan_ZRMCDF/GSE75688_Mig_GOid/GO_", substr(names(N)[i],4,nchar(as.character(names(N)[i]))), sep="",".csv"))
}
}
write.csv(GNlist , "GNlistGSE7568_ZRCDF.csv",row.names = FALSE)
#Union the genes as a pool after GO screening
gepoo <- list.files("C:/Users/tonyxp/Desktop/BreastCan_ZRMCDF/GSE75688_Mig_GOid")
gepo <- gsub("_", ":", gepoo)
gepo <- substr(gepo, 4, 10)
tar <- NULL
for (i in 1:length(gepo)){
tar <- rbind(tar, GOIDList[substr(GOIDList$GOID, 4, 10) == gepo[i], ])
}
tartest <- tar[!duplicated(tar$Gene), ]
write.table(tartest, file = "Genepool_of_GSE75688_Mig.csv", sep = ",", row.names = FALSE)
# Screening and generate GOnames, GOtsne, and GOsil
tartest <- read.table("C:/Users/tonyxp/Desktop/R00006/Genepool of GO screening of GSE75688 FIX.csv", header = TRUE, sep = ",")
Mel.stand2 <- Mel.stand[ , colnames(Mel.stand)%in%(tartest$Gene)]
Mel.stand2[is.nan(Mel.stand2)] <- 0
N <- table(GOIDList$GOID)
GNlist <- NULL
GNVa <- NULL
GNum <- NULL
Indsil <- NULL
GNAvgss <- NULL
p_time <- proc.time()
for(i in 1:length(N)){
if(length(intersect(GOIDList[which(GOIDList$GOID==(names(N)[i])),1],colnames(Mel.stand2))) >= 50 &
length(intersect(GOIDList[which(GOIDList$GOID==(names(N)[i])),1],colnames(Mel.stand2))) <= 500){
AD <- Mel.stand2[ ,colnames(Mel.stand2)%in%GOIDList[which(GOIDList$GOID==(names(N)[i])),1]]
AD <- AD[!apply(AD, 1,function(x) all(abs(x-x[1])<0.00000000001)), ]
tsne <- Rtsne(AD, dims = 2, perplexity=30, max_iter = 5000, check_duplicates = FALSE)
cormat <- cor(t(AD), method = "pearson", use = "pairwise.complete.obs")
cortrans <- 2*(1-cormat)
sil = silhouette (gps, cortrans)
tt <- sil[ ,"sil_width"]
GNsubsil <- NULL
GNlist <- append(GNlist, names(N)[i], after = length(GNlist))
GNVa <- append(GNVa, length(tt[tt<=0])/length(tt), after = length(GNVa))
for(k in 1:length(table(gps))){
KK <- sil[ ,"cluster"] == k
GNsubsil <- append(GNsubsil, sum(sil[KK,"sil_width"]<=0)/sum(KK), after = length(GNsubsil))
}
Indsil <- rbind(Indsil, GNsubsil)
GNAvgss <- append(GNAvgss, mean(GNsubsil), after = length(GNAvgss))
GNum <- append(GNum, ncol(AD), after = length(GNum))
tiff(paste("C:/Users/tonyxp/Desktop/BreastCan_ZRMCDF/BreastCan_ZRMCDF_tsne/", substr(names(N)[i],4,nchar(as.character(names(N)[i])))," Autotsne.tiff", sep=""), width=1600, height=1600, compression="lzw", res=300)
plot(tsne$Y, main= paste(names(N)[i], format(length(tt[tt<=0])/length(tt), digits = 2, format = T), unique(GOIDName[GOIDName$GOID==names(N)[i], ][3])), cex.main = 0.8, col= cc, pch = 16, xlim = c(-35,35), ylim = c(-35,35), cex = 0.4)
dev.off()
tiff(paste("C:/Users/tonyxp/Desktop/BreastCan_ZRMCDF/BreastCan_ZRMCDF_sil/", substr(names(N)[i],4,nchar(as.character(names(N)[i])))," Autosil.tiff", sep=""), width=1600, height=1600, compression="lzw", res=300)
plot(sil, main = paste(names(N)[i], format(length(tt[tt<=0])/length(tt), digits = 2, format = T), unique(GOIDName[GOIDName$GOID==names(N)[i], ][3])), size = 0.8, col= colors[1:13], border=NA)
dev.off()
}
}
t_time <- proc.time()-p_time
print(t_time)
# Randomly runs for computing p-value
p_time <- proc.time()
GNVaAcu <- NULL
GNVaAcuInd <- NULL
for(i in 1:length(GNum)){
GNVaR <- NULL
GNAvgssR <- NULL
for(j in 1:1000){
ADR <- Mel.stand2[ ,sample(ncol(Mel.stand2), GNum[i], replace = FALSE)]
cormatR <- cor(t(ADR), method = "pearson")
cortransR <- 2*(1-cormatR)
silR = silhouette (gps, cortransR)
ttR <- silR[ ,"sil_width"]
GNVaR <- append(GNVaR, length(ttR[ttR<=0])/length(ttR), after = length(GNVaR))
GNsubsilR <- NULL
for(k in 1:length(table(gps))){
KKR <- silR[ ,"cluster"] == k
GNsubsilR <- append(GNsubsilR, sum(silR[KKR,"sil_width"]<=0)/sum(KKR), after = length(GNsubsilR))
}
GNAvgssR <- append(GNAvgssR, mean(GNsubsilR), after = length(GNAvgssR))
}
GNVaAcu <- append(GNVaAcu, 1-(sum(as.numeric(GNVa[i] > GNVaR))/1000), after = length(GNVaAcu))
GNVaAcuInd <- append(GNVaAcuInd, 1-(sum(as.numeric(GNAvgss[i] > GNAvgssR))/1000), after = length(GNVaAcuInd))
}
t_time <- proc.time()-p_time
print(t_time)
E1 <- cbind(GNlist, GNum, Indsil)
E2 <- cbind(GNAvgss, GNVa, GNVaAcuInd, GNVaAcu)
E <- cbind(E1, E2)
E <- as.data.frame(E, stringsAsfactor = FALSE)
E <- E[rev(order(E$GNVaAcu)), ]
NN <- GOIDName[GOIDName$GOID%in%E$GNlist, ]
E <- cbind(NN[match(E$GNlist, NN$GOID), 2], E)
colnames(E) <- c("GO terms", "GO-ID", "Gene Numbers", paste("BC0", 1:9, sep = ""), "BC10", "BC11", "BC03LN","BC07LN", "Clust_NSV", "Whole_NSV", "Clust_p-Val", "Whole_p-Val")
write.table(E, file = "GSE75688_BC_PatientID FIX Go-Scoring-of Clust&Whole_p-val.csv", sep = ",", row.names = FALSE)
|
0fc20b24b834504e78320a2f2d4159a5e1beaca4
|
c194c5236006a758b29bd4d530ad563dc9ecab7e
|
/inst/apps/regression_plots/ui.R
|
ee79c84ba004ea17a70185523e656cfaebc1f485
|
[] |
no_license
|
Auburngrads/teachingApps
|
1087c20a21992433a2f8451db7b1eaa7d1d2cb89
|
b79c192e5f74c5e8376674d4fb9e0b95a426fe03
|
refs/heads/master
| 2021-03-16T07:49:56.579527
| 2020-06-14T12:10:12
| 2020-06-14T12:10:12
| 51,677,745
| 15
| 7
| null | 2018-03-01T03:44:58
| 2016-02-14T03:22:47
|
R
|
UTF-8
|
R
| false
| false
| 889
|
r
|
ui.R
|
ui = fluidPage(theme = add_theme(getShinyOption('theme')),
add_css(),
sidebarLayout(
sidebarPanel(width = 5,
shinyAce::aceEditor(fontSize = 16,
wordWrap = T,
outputId = "regplot",
mode = "r",
theme = "github",
height = "475px",
value =
"par(mfrow = c(2,2), # Arrange plots in a 2x2 array
family = 'serif', # Change the font family
mar = c(2.5,4.1,1.5,2.1)) # Change the plot margins
plot(lm(mtcars), which = 1, las = 1)
plot(lm(mtcars), which = 2, las = 1)
plot(lm(mtcars), which = 3, las = 1)
plot(lm(mtcars), which = 4, las = 1)
par(mfrow = c(1,1)) # Return plot window to a 1x1 array"),
actionButton("evalreg", "Evaluate")),
mainPanel(plotOutput("plotreg", height = "550px"), width = 7)))
|
e2bffd50a92e6a719f3dd3c129dcfc81bea5026e
|
8f53c854ca23303ac0e428a48d362fde4a9cb8c2
|
/Regularisation with Lasso,Ridge and Elastic net.R
|
817bb344cfd101a49aa791d71b903776e9ed571f
|
[] |
no_license
|
Tapas1990/Regularisation-Techniques
|
b79440140b5febc34ae4b272d058b075a160c687
|
990b4cd828e7a99cabe1b5d16cc35a24f85bc53b
|
refs/heads/master
| 2021-01-20T14:17:11.650643
| 2017-05-08T04:19:49
| 2017-05-08T04:19:49
| 90,585,889
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,931
|
r
|
Regularisation with Lasso,Ridge and Elastic net.R
|
#Setup
library(glmnet)
library(caret)
options(scipen=999)
'%ni%'=Negate('%in%')
#Create training and test Data
prostate=read.csv('https://goo.gl/qmrDcY')
#glmnet doesnt use formula interface.x and y have to be explicitly set also
#also training and test can only be matrix object.
set.seed(100)
train_rows=createDataPartition(prostate$lpsa,p=0.7,list=FALSE)
train_data=prostate[train_rows,]
test_data=prostate[-train_rows,]
train_x=as.matrix(train_data[,colnames(train_data)%ni%'lpsa'])
train_y=as.matrix(train_data[,'lpsa'])
test_x=as.matrix(test_data[,colnames(train_data)%ni%'lpsa'])
test_y=as.matrix(test_data[,'lpsa'])
#Step 1
grid=10^seq(10,-2,length=100)
grid
#Step 2
ridgeMod=glmnet(train_x,train_y,alpha = 0,
lambda = grid,thresh = 1e-12)
ridgeMod
#Step 3
set.seed(100)
cv.out=cv.glmnet(train_x,train_y,alpha=0)#Alpha is the elastic net,mixing parameter
#alpha=0:RIDGE and alpha=1:LASSO
plot(cv.out)#2 vertical lines:1st points to lowest mean square error ,2nd one points to max variance within 1 sd
bestlam=cv.out$lambda.min
bestlam
#Step 4
pred=predict(ridgeMod,s=bestlam,newx=test_x)
DMwR::regr.eval(test_y,pred)#mape=33.5%
#plot values of coefficients against log lambda in X axis
coefs_ridge=predict(ridgeMod,type='coefficients',s=bestlam)
coefs_ridge
plot(ridgeMod,xvar='lambda')
#None of the coefficients are zero until lambda is too large.
#Lasso
set.seed(100)
#Step 1
grid=10^seq(10,-2,length=100)
grid
#Step 2
lassoMod=glmnet(train_x,train_y,alpha = 1,
lambda = grid,thresh = 1e-12)
lassoMod
#Step 3
cv.out=cv.glmnet(train_x,train_y,alpha=1)#Alpha is the elastic net,mixing parameter
#alpha=0:RIDGE and alpha=1:LASSO
plot(cv.out)#2 vertical lines:1st points to lowest mean square error ,2nd one points to max variance within 1 sd
bestlam=cv.out$lambda.min
bestlam
#Step 4
pred=predict(lassoMod,s=bestlam,newx=test_x)
DMwR::regr.eval(test_y,pred)#mape:32.6%
coefs_ridge=predict(lassoMod,type='coefficients',s=bestlam)
coefs_ridge
plot(lassoMod,xvar='lambda')
#Mxing parameter alpha:mix of ridge and lasso
#for prostate dataset:Find mixing parameter alpha that minimises
#MAPE,for glmnet().Let step value for alpha search be 0.01.
#Note:The 'foldid'needs to be fixed for comparing MAPE for different alphas in cv.glmnet func so that same no of rows are considered always.
library(glmnet)
library(caret)
options(scipen=999)
'%ni%'=Negate('%in%')
#Create training and test Data
prostate=read.csv('https://goo.gl/qmrDcY')
#glmnet doesnt use formula interface.x and y have to be explicitly set also
#also training and test can only be matrix object.
set.seed(100)
train_rows=createDataPartition(prostate$lpsa,p=0.7,list=FALSE)
train_data=prostate[train_rows,]
test_data=prostate[-train_rows,]
train_x=as.matrix(train_data[,colnames(train_data)%ni%'lpsa'])
train_y=as.matrix(train_data[,'lpsa'])
test_x=as.matrix(test_data[,colnames(train_data)%ni%'lpsa'])
test_y=as.matrix(test_data[,'lpsa'])
alphas=seq(0,1,by=0.01)
alphas
set.seed(100)
foldid=sample(1:10,size=length(train_y),replace=TRUE)#define the foldid.
grid=10^seq(10,-2,length=100)#start 10 end -2 :# to the power:10,7,4,1,-2...
grid
mapes=numeric(length(alphas))#initialize output
mapes
i=1#loop counter.
for(a in alphas){
bestlam=cv.glmnet(train_x,train_y,alpha=a,lambda= grid,foldid=foldid)$lambda.min#get best lambda for given alpha'a'
enetMod=glmnet(train_x,train_y,alpha=a,lambda = bestlam)#fit glmnet model
pred=predict(enetMod,s=bestlam,newx=test_x)#predict
mapes[i]=DMwR::regr.eval(test_y,pred)[4]#get MAPE
i=i+1#increment loop counter
}
out=cbind(alphas,mapes)#final alphas and best MAPE
out
#Plot
plot(out,type='l',col='blue')
alpha=out[which.min(out[,2]),1]
alpha
mape=out[which.min(out[,2]),2]
mape
points(x=alpha,y=mape,cex=2,col='red',pch='*')
alpha
|
f917393b876e8232a51cf02fd7ab1d50b354bbeb
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/sourcecode/kintsch73.R
|
80b899f282bc95cb7d7a9d3a9029e5f7dd83ee7d
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,767
|
r
|
kintsch73.R
|
#
# kintsch73.R, 28 Jan 20
# Data from:
# Comprehension and Recall of Text as a Function of Content Variables
# W. Kintsch and E. Kozminsky and W. J. Streby and G. McKoon and J. M. Keenan
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG example_experiment reading_example comprehension_example recall_example
source("ESEUR_config.r")
pal_col=rainbow(2)
plot(0, type="n", bty="n",
xlim=c(0.05, 2.15), ylim=c(0.05, 1.9),
xaxs="i", yaxs="i",
xaxt="n", yaxt="n",
xlab="", ylab="")
off=1.2
text(0, off+0.65, "Romulus, the legendary founder of Rome, took", pos=4)
text(0, off+0.55, " the women of the Sabine by force.", pos=4)
text(0, off+0.4, "1", pos=4, col=pal_col[1])
text(0, off+0.3, "2", pos=4, col=pal_col[1])
text(0, off+0.2, "3", pos=4, col=pal_col[1])
text(0, off+0.1, "4", pos=4, col=pal_col[1])
text(0.1, off+0.4, "(took, Romulus, women, by force)", pos=4, col=point_col)
text(0.1, off+0.3, "(found, Romulus, Rome)", pos=4, col=point_col)
text(0.1, off+0.2, "(legendary, Romulus)", pos=4, col=point_col)
text(0.1, off+0.1, "(Sabine, women)", pos=4, col=point_col)
text(1.47, off+0.24, "1", col=pal_col[1])
# arrow right 0.15
text(1.5+0.17, off+0.24, " 3", pos=4, col=pal_col[1])
arrows(x0=1.5, y0=off+0.24, x1=1.5+0.25, length=0.05, col=pal_col[2])
text(1.5+0.17, off+0.24+0.15, " 2", pos=4, col=pal_col[1])
arrows(x0=1.5, y0=off+0.24, x1=1.5+0.25, y1=off+0.24+0.15, length=0.05, col=pal_col[2])
text(1.5+0.17, off+0.24-0.15, " 4", pos=4, col=pal_col[1])
arrows(x0=1.5, y0=off+0.24, x1=1.5+0.25, y1=off+0.24-0.12, length=0.05, col=pal_col[2])
text(0, 1.05, "Cleopatra's downfall lay in her foolish trust in the", pos=4)
text(0, 0.95, " fickle political figures of the Roman world.", pos=4)
text(0, 0.8, "1", pos=4, col=pal_col[1])
text(0, 0.7, "2", pos=4, col=pal_col[1])
text(0, 0.6, "3", pos=4, col=pal_col[1])
text(0, 0.5, "4", pos=4, col=pal_col[1])
text(0, 0.4, "5", pos=4, col=pal_col[1])
text(0, 0.3, "6", pos=4, col=pal_col[1])
text(0, 0.2, "7", pos=4, col=pal_col[1])
text(0, 0.1, "8", pos=4, col=pal_col[1])
text(0.1, 0.8, expression("(because, "*alpha*", "*beta*")"), pos=4, col=point_col)
text(0.1, 0.7, expression(alpha*" "%->%" (fell down, Cleopatra)"), pos=4, col=point_col)
text(0.1, 0.6, expression(beta*" " %->%" (trust, Cleopatra, figures)"), pos=4, col=point_col)
text(0.1, 0.5, "(foolish, trust)", pos=4, col=point_col)
text(0.1, 0.4, "(fickle, figures)", pos=4, col=point_col)
text(0.1, 0.3, "(political, figures)", pos=4, col=point_col)
text(0.1, 0.2, "(part of, figures, world)", pos=4, col=point_col)
text(0.1, 0.1, "(Roman, world)", pos=4, col=point_col)
text(1.3+0.00, 0.45+0.00, "1", col=pal_col[1])
text(1.3+0.20, 0.45+0.00, " 3", pos=4, col=pal_col[1])
arrows(x0=1.35, y0=0.45, x1=1.35+0.20, length=0.05, col=pal_col[2])
text(1.3+0.05+0.40, 0.45+0.00, " 4", pos=4, col=pal_col[1])
arrows(x0=1.35+0.10+0.20, y0=0.45, x1=1.35+0.10+0.2+0.15, length=0.05, col=pal_col[2])
text(1.3+0.20, 0.45+0.15, " 2", pos=4, col=pal_col[1])
arrows(x0=1.35, y0=0.45, x1=1.35+0.20, y=0.45+0.15, length=0.05, col=pal_col[2])
text(1.3+0.05+0.40, 0.45-0.15, " 5", pos=4, col=pal_col[1])
arrows(x0=1.35+0.10+0.20, y0=0.45, x1=1.35+0.10+0.2+0.15, y1=0.45-0.15, length=0.05, col=pal_col[2])
text(1.3+0.10+0.60, 0.45-0.15, " 6", pos=4, col=pal_col[1])
arrows(x0=1.35+0.10+0.45, y0=0.45-0.15, x1=1.35+0.10+0.2+0.40, length=0.05, col=pal_col[2])
text(1.3+0.05+0.40, 0.45-0.30, " 7", pos=4, col=pal_col[1])
arrows(x0=1.35+0.10+0.20, y0=0.45, x1=1.35+0.10+0.2+0.15, y1=0.45-0.30, length=0.05, col=pal_col[2])
text(1.3+0.10+0.60, 0.45-0.30, " 8", pos=4, col=pal_col[1])
arrows(x0=1.35+0.10+0.45, y0=0.45-0.30, x1=1.35+0.10+0.2+0.40, length=0.05, col=pal_col[2])
|
313ba3e30bafee40cd76a8b4d7b98010c4610517
|
fb6664c4c523cb31c62c1a8362cc86c191a45b7c
|
/Scripts/CPIscript.R
|
2a6a45bacc1ef61dafadae221788fcffa87c7546
|
[] |
no_license
|
ProfuTofu/CRH-2020-Spring-Capstone
|
91e58f9228b6e89475390ff67ced9221cbdd6c66
|
d9766d2665d049ffec35c27d184e9eaead9b657a
|
refs/heads/master
| 2022-08-24T00:41:55.751835
| 2020-05-29T18:19:33
| 2020-05-29T18:19:33
| 265,740,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,356
|
r
|
CPIscript.R
|
#convert first column of dataset to an time-series object
TS_CPI <- ts(CPI[625:873,2],start=1999, frequency=12)
ggarrange(ggAcf(TS_CPI),ggPacf(TS_CPI))
#-------------------------------------------------
#kpss unit root test (statistical test of stationarity), H0 = stationary, HA = not stationary
TS_CPI %>% ur.kpss() %>% summary()
#in this case value of test statistic is 12.569
#critical values for 10%, 5%, 2.5%, and 1% significance are 0.347 0.463 0.574 0.739 so we fail to reject H0
#need to make time-series stationary
#-------------------------------------------------
#determine number of single differences to make data stationary given a level of significance of 0.05
CPIsingle_diffs <- ndiffs(TS_CPI, alpha=0.05, test="kpss")
#output is 2, so two single differences (second order differencing) is needed to make data stationary at alpha=0.05
#differencing by the reccomended number of first differences
D_CPI <- diff(TS_CPI,lag=1,differences=CPIsingle_diffs)
#-------------------------------------------------
#graphing time series
cbind("CPI (1982 to 1984 = 100)" = TS_CPI,
"CPI Differenced" = D_CPI) %>%
autoplot(facets=TRUE) +
xlab("Year") + ylab("") + ggtitle("Consumer Price Index")
#----------------------------------------
---------
#forecasting time series with auto.arima
CPImodel1 <- auto.arima(TS_CPI, seasonal=FALSE)
#forecast next 15 values, with confidence intervals
CPIforecast1 <-forecast(CPImodel1, h=15, level=c(85,90,95))
#graph the forecast and past 24 observations
CPIgraph1 <- autoplot(CPIforecast1, include=36)
CPIgraph1
#-------------------------------------------------
#determining parameters for ARIMA without auto.arima
#note that this is already differenced dataset
#ACF and PACF plots
ggarrange(ggAcf(D_CPI),ggPacf(D_CPI))
#notice that the first three PACF values are negative and below the critical values (slightly overdifferenced)
#because of this, we should add 3 MA terms to the model
CPImodel2 <- arima(TS_CPI, order=c(3,1,3))
CPIforecast2 <-forecast(CPImodel2, h=15, level=c(85,90,95))
CPIgraph2 <- autoplot(CPIforecast2, include=36)
CPIgraph2
#-------------------------------------------------
ggarrange(CPIgraph1, CPIgraph2)
CPI_residuals_1 <- as.vector(CPImodel1[["residuals"]])
dates <- seq( from= 1999, to= 2019.666666666, by=0.083333333)
plot(CPI_residuals_1~dates)
|
318b958f19b325869564322f413f55eb2ea2cf50
|
ebc1cd487bb411d73a288cdf193205f6f6ed89f8
|
/tests/testthat/test_metrics.R
|
76ecfd394f03f0b3512b0f64d2e8b399f07fe1f8
|
[
"MIT"
] |
permissive
|
bbolker/cvms
|
e95366a693355cd964f315eb585344e919b1115d
|
9efa5fd63a46ff09c3695b33690d8a28634eb90b
|
refs/heads/master
| 2021-04-19T04:03:40.781371
| 2019-12-05T18:16:00
| 2019-12-05T18:16:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,107
|
r
|
test_metrics.R
|
library(cvms)
context("metrics")
# Diagnosis by score
test_that("Metrics work for glm in validate()",{
# skip_test_if_old_R_version()
set_seed_for_R_compatibility(7)
dat <- groupdata2::partition(participant.scores, p = 0.8,
cat_col = 'diagnosis',
id_col = 'participant',
list_out = FALSE)
validated <- validate(train_data=dat, models="diagnosis~score",
partitions_col = '.partitions', family = 'binomial',
positive=1)
same_model <- glm(diagnosis~score, data=dat[dat$.partitions==1,], family = 'binomial')
train_data <- dat[dat$.partitions == 1, ]
test_data <- dat[dat$.partitions == 2, ]
prob <- predict(same_model, newdata = test_data, type = c("response"))
test_data$prob <- prob
test_data <- test_data %>%
dplyr::mutate(pred = dplyr::if_else(prob>0.5,1,0))
# AUC
g <- pROC::roc(diagnosis ~ prob, data = test_data, direction = "<", levels=c(0,1))
expect_equal(validated$Results$AUC,as.numeric(g$auc))
auc2 <- AUC::auc(AUC::roc(test_data$prob, factor(test_data$diagnosis)))
expect_equal(validated$Results$AUC,auc2)
# Sensitivity
sens <- caret::sensitivity(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$Sensitivity,sens)
# Specificity
spec <- caret::specificity(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$Specificity,spec)
# posPredValue
posPredValue_ <- caret::posPredValue(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$`Pos Pred Value`,posPredValue_)
# should be identical to precision as well
precision_ <- caret::precision(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(posPredValue_, posPredValue_)
# negPredValue
negPredValue_ <- caret::negPredValue(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$`Neg Pred Value`,negPredValue_)
rm(test_data)
})
test_that("Metrics work for glmer in validate()",{
# skip_test_if_old_R_version()
set_seed_for_R_compatibility(7)
dat <- groupdata2::partition(participant.scores, p = 0.8,
cat_col = 'diagnosis',
id_col = 'participant',
list_out = FALSE)
validated <- validate(train_data=dat, models="diagnosis~score+(1|session)",
partitions_col = '.partitions', family = 'binomial',
positive=1)
same_model <- lme4::glmer(diagnosis~score+(1|session), data=dat[dat$.partitions==1,], family = 'binomial')
train_data <- dat[dat$.partitions==1,]
test_data <- dat[dat$.partitions==2,]
prob <- predict(same_model, newdata=test_data, type=c("response"))
test_data$prob <- prob
test_data <- test_data %>%
dplyr::mutate(pred = dplyr::if_else(prob>0.5,1,0))
# AUC
auc1 <- pROC::roc(diagnosis ~ prob, data = test_data, levels = c(0, 1), direction = "<")
expect_equal(validated$Results$AUC,as.numeric(auc1$auc))
auc2 <- AUC::auc(AUC::roc(test_data$prob, factor(test_data$diagnosis)))
expect_equal(validated$Results$AUC,auc2)
# Sensitivity
sens <- caret::sensitivity(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$Sensitivity,sens)
# Specificity
spec <- caret::specificity(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$Specificity,spec)
# posPredValue
posPredValue_ <- caret::posPredValue(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$`Pos Pred Value`,posPredValue_)
# negPredValue
negPredValue_ <- caret::negPredValue(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$`Neg Pred Value`,negPredValue_)
})
# Diagnosis by age
test_that("Metrics work for glm in validate()",{
# skip_test_if_old_R_version()
set_seed_for_R_compatibility(6)
dat <- groupdata2::partition(participant.scores, p = 0.8,
cat_col = 'diagnosis',
id_col = 'participant',
list_out = FALSE)
validated <- validate(train_data=dat, models="diagnosis~age",
partitions_col = '.partitions', family = 'binomial',
positive=1)
same_model <- glm(diagnosis~age, data=dat[dat$.partitions==1,], family = 'binomial')
train_data <- dat[dat$.partitions==1,]
test_data <- dat[dat$.partitions==2,]
prob <- predict(same_model, newdata=test_data, type=c("response"))
test_data$prob <- prob
test_data <- test_data %>%
dplyr::mutate(pred = dplyr::if_else(prob>0.5,1,0))
# AUC
g <- pROC::roc(diagnosis ~ prob, data = test_data,
direction = "<", levels=c(0,1))
expect_equal(validated$Results$AUC,as.numeric(g$auc))
roc_ <- AUC::roc(test_data$prob, factor(test_data$diagnosis))
auc2 <- AUC::auc(AUC::roc(test_data$prob, factor(test_data$diagnosis)))
expect_equal(validated$Results$AUC, auc2) # TODO What is the actual underlying error here?
# Sensitivity
sens <- caret::sensitivity(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$Sensitivity,sens)
# Specificity
spec <- caret::specificity(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$Specificity,spec)
# posPredValue
posPredValue_ <- caret::posPredValue(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$`Pos Pred Value`,posPredValue_)
# negPredValue
negPredValue_ <- caret::negPredValue(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$`Neg Pred Value`,negPredValue_)
rm(test_data)
})
test_that("Metrics work for glmer in validate()",{
# skip_test_if_old_R_version()
set_seed_for_R_compatibility(201)
dat <- groupdata2::partition(participant.scores, p = 0.8,
cat_col = 'diagnosis',
id_col = 'participant',
list_out = FALSE)
validated <- validate(train_data=dat, models="diagnosis~age+(1|session)",
partitions_col = '.partitions', family = 'binomial',
positive=1)
same_model <- lme4::glmer(diagnosis~age+(1|session),
data=dat[dat$.partitions==1,], family = 'binomial')
train_data <- dat[dat$.partitions==1,]
test_data <- dat[dat$.partitions==2,]
prob <- predict(same_model, newdata=test_data, type=c("response"))
test_data$prob <- prob
test_data <- test_data %>%
dplyr::mutate(pred = dplyr::if_else(prob>0.5,1,0))
# AUC
auc1 <- pROC::roc(diagnosis ~ prob, data = test_data, direction = "<", levels=c(0,1))
expect_equal(validated$Results$AUC,as.numeric(auc1$auc))
auc2 <- AUC::auc(AUC::roc(test_data$prob,
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis)))))
expect_equal(validated$Results$AUC,auc2)
# Sensitivity
sens <- caret::sensitivity(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$Sensitivity,sens)
# Specificity
spec <- caret::specificity(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$Specificity,spec)
# posPredValue
posPredValue_ <- caret::posPredValue(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$`Pos Pred Value`,posPredValue_)
# negPredValue
negPredValue_ <- caret::negPredValue(factor(test_data$pred, levels=levels(as.factor(train_data$diagnosis))),
factor(test_data$diagnosis, levels=levels(as.factor(train_data$diagnosis))),
positive = levels(as.factor(test_data$diagnosis))[1])
expect_equal(validated$Results$`Neg Pred Value`,negPredValue_)
})
test_that("Metrics work when 0 is positive class for glmer in validate()",{
# skip_test_if_old_R_version()
# AUC approach was improved from this answer: https://stats.stackexchange.com/a/269577
# Here I test that it works.
# First we will check what should be the behavior, when changing positive to 0.
participant.scores$perfect_predicted_probability <- c(0.8, 0.9, 0.7, 0.3,0.2,0.1,
0.8,0.7,0.7,0.1,0.4,0.3,
0.8, 0.9, 0.7, 0.8,0.7,
0.7, 0.7, 0.9, 0.8, 0.8,
0.7, 0.95, 0.3, 0.2, 0.1,
0.4, 0.25, 0.2)
participant.scores$few_false_negs_predicted_probability <-c(0.2, 0.3, 0.4, 0.3,0.2,0.1,
0.8,0.7,0.7,0.1,0.4,0.3,
0.8, 0.9, 0.7, 0.8,0.7,
0.7, 0.7, 0.9, 0.8, 0.8,
0.7, 0.95, 0.3, 0.2, 0.1,
0.4, 0.25, 0.2)
participant.scores$few_false_pos_predicted_probability <- c(0.8, 0.9, 0.7, 0.7,0.9,0.6,
0.8,0.7,0.7,0.1,0.4,0.3,
0.8, 0.9, 0.7, 0.8,0.7,
0.7, 0.7, 0.9, 0.8, 0.8,
0.7, 0.95, 0.3, 0.2, 0.1,
0.4, 0.25, 0.2)
participant.scores$worst_predicted_probability <- 1 - c(0.8, 0.9, 0.7, 0.3,0.2,0.1,
0.8,0.7,0.7,0.1,0.4,0.3,
0.8, 0.9, 0.7, 0.8,0.7,
0.7, 0.7, 0.9, 0.8, 0.8,
0.7, 0.95, 0.3, 0.2, 0.1,
0.4, 0.25, 0.2)
# AUC (positive = 1 vs positive = 0)
# PERFECT
# With AUC::
AUC_auc_perfect <- AUC::auc(AUC::roc(participant.scores$perfect_predicted_probability,
factor(participant.scores$diagnosis)))
AUC_auc_perfect_pos0 <- AUC::auc(AUC::roc(1 - participant.scores$perfect_predicted_probability,
factor(1 - participant.scores$diagnosis)))
expect_equal(AUC_auc_perfect, AUC_auc_perfect_pos0)
# With pROC
pROC_auc_perfect <- as.numeric(pROC::roc(response = participant.scores$diagnosis,
predictor = participant.scores$perfect_predicted_probability,
direction = "<", levels=c(0,1))$auc)
pROC_auc_perfect_pos0 <- as.numeric(pROC::roc(response = 1-participant.scores$diagnosis,
predictor = 1-participant.scores$perfect_predicted_probability,
direction = ">", levels=c(1,0))$auc)
expect_equal(pROC_auc_perfect, pROC_auc_perfect_pos0)
expect_equal(pROC_auc_perfect, AUC_auc_perfect)
expect_equal(AUC_auc_perfect_pos0, pROC_auc_perfect_pos0)
# FALSE NEGATIVES
# With AUC
AUC_auc_false_negs <- AUC::auc(AUC::roc(participant.scores$few_false_negs_predicted_probability,
factor(participant.scores$diagnosis)))
AUC_auc_false_negs_pos0 <- AUC::auc(AUC::roc(1 - participant.scores$few_false_negs_predicted_probability,
factor(1 - participant.scores$diagnosis)))
expect_equal(AUC_auc_false_negs, AUC_auc_false_negs_pos0)
# With pROC
pROC_auc_false_negs <- as.numeric(pROC::roc(response = participant.scores$diagnosis,
predictor = participant.scores$few_false_negs_predicted_probability,
direction = "<", levels=c(0,1))$auc)
pROC_auc_false_negs_pos0 <- as.numeric(pROC::roc(response = 1-participant.scores$diagnosis,
predictor = 1-participant.scores$few_false_negs_predicted_probability,
direction = ">", levels=c(1,0))$auc)
expect_equal(pROC_auc_false_negs, pROC_auc_false_negs_pos0)
expect_equal(pROC_auc_false_negs, AUC_auc_false_negs)
expect_equal(AUC_auc_false_negs_pos0, pROC_auc_false_negs_pos0)
# FALSE POSITIVES
# With AUC
AUC_auc_false_pos <- AUC::auc(AUC::roc(participant.scores$few_false_pos_predicted_probability,
factor(participant.scores$diagnosis)))
AUC_auc_false_pos_pos0 <- AUC::auc(AUC::roc(1 - participant.scores$few_false_pos_predicted_probability,
factor(1 - participant.scores$diagnosis)))
expect_equal(AUC_auc_false_pos, AUC_auc_false_pos_pos0)
# With pROC
pROC_auc_false_pos <- as.numeric(pROC::roc(response = participant.scores$diagnosis,
predictor = participant.scores$few_false_pos_predicted_probability,
direction = "<", levels=c(0,1))$auc)
pROC_auc_false_pos_pos0 <- as.numeric(pROC::roc(response = 1-participant.scores$diagnosis,
predictor = 1-participant.scores$few_false_pos_predicted_probability,
direction = ">", levels=c(1,0))$auc)
expect_equal(pROC_auc_false_pos, pROC_auc_false_pos_pos0)
expect_equal(pROC_auc_false_pos, AUC_auc_false_pos)
expect_equal(AUC_auc_false_pos_pos0, pROC_auc_false_pos_pos0)
# ALL WRONG
# With AUC
AUC_auc_worst <- AUC::auc(AUC::roc(participant.scores$worst_predicted_probability,
factor(participant.scores$diagnosis)))
AUC_auc_worst_pos0 <- AUC::auc(AUC::roc(1 - participant.scores$worst_predicted_probability,
factor(1 - participant.scores$diagnosis)))
expect_equal(AUC_auc_worst, AUC_auc_worst_pos0)
# With pROC
pROC_auc_worst <- as.numeric(pROC::roc(response = participant.scores$diagnosis,
predictor = participant.scores$worst_predicted_probability,
direction = "<", levels=c(0,1))$auc)
pROC_auc_worst_pos0 <- as.numeric(pROC::roc(response = 1-participant.scores$diagnosis,
predictor = 1-participant.scores$worst_predicted_probability,
direction = ">", levels=c(1,0))$auc)
expect_equal(pROC_auc_worst, pROC_auc_worst_pos0)
expect_equal(pROC_auc_worst, AUC_auc_worst)
expect_equal(AUC_auc_worst_pos0, pROC_auc_worst_pos0)
set_seed_for_R_compatibility(201)
dat <- groupdata2::partition(participant.scores, p = 0.8,
cat_col = 'diagnosis',
id_col = 'participant',
list_out = FALSE)
validated_pos1 <- validate(train_data=dat, models="diagnosis~score",
partitions_col = '.partitions', family = 'binomial',
positive = 2)
validated_pos0 <- validate(train_data=dat, models="diagnosis~score",
partitions_col = '.partitions', family = 'binomial',
positive = 1)
expect_equal(validated_pos1$Results$AUC,validated_pos0$Results$AUC)
validated_pos1 <- validate(train_data=dat, models="diagnosis~age",
partitions_col = '.partitions', family = 'binomial',
positive = 2)
validated_pos0 <- validate(train_data=dat, models="diagnosis~age",
partitions_col = '.partitions', family = 'binomial',
positive = 1)
expect_equal(validated_pos1$Results$AUC,validated_pos0$Results$AUC)
# If dependent variable is character factor
dat$diagnosis_chr <- factor(ifelse(dat$diagnosis == 0, "a", "b"))
validated_pos1_num <- validate(train_data=dat, models="diagnosis_chr~age",
partitions_col = '.partitions', family = 'binomial',
positive = 2)
validated_pos1_chr <- validate(train_data=dat, models="diagnosis_chr~age",
partitions_col = '.partitions', family = 'binomial',
positive = "b")
expect_equal(validated_pos1_num$Results$AUC,validated_pos1_chr$Results$AUC)
validated_pos0_num <- validate(train_data=dat, models="diagnosis_chr~age",
partitions_col = '.partitions', family = 'binomial',
positive = 1)
validated_pos0_chr <- validate(train_data=dat, models="diagnosis_chr~age",
partitions_col = '.partitions', family = 'binomial',
positive = "a")
expect_equal(validated_pos0_num$Results$AUC,validated_pos0_chr$Results$AUC)
expect_equal(validated_pos0_num$Results$AUC,validated_pos1_num$Results$AUC)
expect_equal(validated_pos0_chr$Results$AUC,validated_pos1_chr$Results$AUC)
})
test_that("Metrics work in cross_validate()",{
# skip_test_if_old_R_version()
#
# In this test I printed the predictions within each training loop
# and manually copied the predictions
# I did this to ensure that cross_validate gathers the predictions correctly before
# calculating its metrics. This is incredibly important.
# Metrics are calculated and compared to the metrics I got from cross_validate.
#
target <- c(0,0,0,1,1,1,
0,0,0,1,1,1,1,1,1,
0,0,0,1,1,1,
0,0,0,1,1,1,1,1,1)
predictions_prob <- c(0.77379615,0.36952324,0.09125579,0.89205819,
0.73620142,0.55282759,0.8307928,0.6042899,
0.1754574,0.9317034,0.8307928,0.5145979,
0.9269098,0.6874739,0.5867096,0.71867985,
0.26746773,0.09346533,0.85976827,0.24884534,
0.13205012,0.6503171,0.4541755,0.1564246,
0.8445872,0.7085838,0.5871876,0.8514956,
0.7607141,0.7085838)
predictions <- dplyr::if_else(predictions_prob>0.5,1,0)
pred_df <- data.frame("obs"=target, "prob"=predictions_prob, "pred"=predictions)
# AUC
auc1 <- pROC::roc(obs ~ prob, data = pred_df, direction = "<", levels=c(0,1))
expect_equal(as.numeric(auc1$auc), 0.7615741, tolerance = 1e-3)
auc2 <- AUC::auc(AUC::roc(pred_df$prob, factor(pred_df$obs)))
expect_equal(auc2,0.7615741, tolerance = 1e-3)
# Sensitivity
sens <- caret::sensitivity(as.factor(pred_df$pred), as.factor(pred_df$obs),
positive = levels(as.factor(pred_df$obs))[1])
expect_equal(sens,0.5833333, tolerance = 1e-3)
# # Specificity
spec <- caret::specificity(as.factor(pred_df$pred), as.factor(pred_df$obs),
positive = levels(as.factor(pred_df$obs))[1])
expect_equal(spec,0.8888889, tolerance = 1e-3)
# posPredValue
posPredValue_ <- caret::posPredValue(as.factor(pred_df$pred), as.factor(pred_df$obs),
positive = levels(as.factor(pred_df$obs))[1])
expect_equal(posPredValue_,0.7777778, tolerance = 1e-3)
# negPredValue
negPredValue_ <- caret::negPredValue(as.factor(pred_df$pred), as.factor(pred_df$obs),
positive = levels(as.factor(pred_df$obs))[1])
expect_equal(negPredValue_,0.7619048, tolerance = 1e-3)
# F1
F1 <- (2 * posPredValue_ * sens) / (posPredValue_ + sens)
expect_equal(F1,0.6666667, tolerance = 1e-3)
# Confusion matrix
confMat <- caret::confusionMatrix(factor(pred_df$pred, levels=c(0,1)),
reference=factor(pred_df$obs, levels=c(0,1)))
TP <- confMat$table[1] # Dependent on positive = 0 ?
FP <- confMat$table[3]
FN <- confMat$table[2]
TN <- confMat$table[4]
precision <- TP / (TP + FP)
recall <- TP / (TP + FN)
F1_2 <- 2 * precision * recall / (precision + recall)
expect_equal(F1_2,0.6666667, tolerance = 1e-3)
# Test that MCC does not care about what class if positive
expect_equal(mltools::mcc(TP=TP, FP=FP, FN=FN, TN=TN),
mltools::mcc(TP=TN, FP=FN, FN=FP, TN=TP))
# Add tests for the following metrics
# expect_equal(#LowerCI, 0.5851154)
# expect_equal(#UpperCI, 0.9380328)
# expect_equal(#kappa, 0.4927536)
# expect_equal(#prevalence, 0.4)
# expect_equal(#detectionrate, 0.2333333)
# expect_equal(#detectionprevalence, 0.3)
# expect_equal(#balanceACC, 0.7361111)
})
test_that("mae and rmse works", {
# skip_test_if_old_R_version()
# Normal distribution
set_seed_for_R_compatibility(6)
targets <- rnorm(100)
preds <- rnorm(100)
# RMSE
expect_equal(rmse(predictions = preds, targets = targets), 1.23924, tolerance = 1e-3)
# MAE
expect_equal(mae(predictions = preds, targets = targets), 0.9888096, tolerance = 1e-3)
# Uniform distribution
set_seed_for_R_compatibility(9)
targets <- runif(100,min = 45, max = 97)
preds <- runif(100,min = 54, max = 120)
# RMSE
expect_equal(rmse(predictions = preds, targets = targets), 30.2487, tolerance = 1e-3)
# MAE
expect_equal(mae(predictions = preds, targets = targets), 24.3477, tolerance = 1e-3)
})
|
bd45dc087a2a6358b1c020d478d2823529b9aeed
|
594a5c780c6bf31da16e0c29270c74ba6737e842
|
/man/JAGS_fit.Rd
|
5adcf8d844d9e143eb07a8699adc6d2c5596e06c
|
[] |
no_license
|
FBartos/BayesTools
|
55faad16c0b3558d519161c33b7cf448a34ca739
|
98fa230cf191b9093fb23f878799367d940b19d1
|
refs/heads/master
| 2023-07-19T08:40:23.734725
| 2023-07-11T13:16:49
| 2023-07-11T13:16:49
| 363,232,801
| 8
| 1
| null | 2023-07-11T13:16:50
| 2021-04-30T18:57:23
|
R
|
UTF-8
|
R
| false
| true
| 4,851
|
rd
|
JAGS_fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JAGS-fit.R
\name{JAGS_fit}
\alias{JAGS_fit}
\alias{JAGS_extend}
\title{Fits a 'JAGS' model}
\usage{
JAGS_fit(
model_syntax,
data = NULL,
prior_list = NULL,
formula_list = NULL,
formula_data_list = NULL,
formula_prior_list = NULL,
chains = 4,
adapt = 500,
burnin = 1000,
sample = 4000,
thin = 1,
autofit = FALSE,
autofit_control = list(max_Rhat = 1.05, min_ESS = 500, max_error = 0.01, max_SD_error =
0.05, max_time = list(time = 60, unit = "mins"), sample_extend = 1000, restarts = 10),
parallel = FALSE,
cores = chains,
silent = TRUE,
seed = NULL,
add_parameters = NULL,
required_packages = NULL
)
JAGS_extend(
fit,
autofit_control = list(max_Rhat = 1.05, min_ESS = 500, max_error = 0.01, max_SD_error =
0.05, max_time = list(time = 60, unit = "mins"), sample_extend = 1000, restarts = 10),
parallel = FALSE,
cores = NULL,
silent = TRUE,
seed = NULL
)
}
\arguments{
\item{model_syntax}{jags syntax for the model part}
\item{data}{list containing data to fit the model (not including data for the formulas)}
\item{prior_list}{named list of prior distribution
(names correspond to the parameter names) of parameters not specified within the
\code{formula_list}}
\item{formula_list}{named list of formulas to be added to the model
(names correspond to the parameter name created by each of the formula)}
\item{formula_data_list}{named list of data frames containing data for each formula
(names of the lists correspond to the parameter name created by each of the formula)}
\item{formula_prior_list}{named list of named lists of prior distributions
(names of the lists correspond to the parameter name created by each of the formula and
the names of the prior distribution correspond to the parameter names) of parameters specified
within the \code{formula}}
\item{chains}{number of chains to be run, defaults to \code{4}}
\item{adapt}{number of samples used for adapting the MCMC chains, defaults to \code{500}}
\item{burnin}{number of burnin iterations of the MCMC chains, defaults to \code{1000}}
\item{sample}{number of sampling iterations of the MCMC chains, defaults to \code{4000}}
\item{thin}{thinning interval for the MCMC samples, defaults to \code{1}}
\item{autofit}{whether the models should be refitted until convergence criteria
specified in \code{autofit_control}. Defaults to \code{FALSE}.}
\item{autofit_control}{a list of arguments controlling the autofit function.
Possible options are:
\describe{
\item{max_Rhat}{maximum R-hat error for the autofit function.
Defaults to \code{1.05}.}
\item{min_ESS}{minimum effective sample size. Defaults to \code{500}.}
\item{max_error}{maximum MCMC error. Defaults to \code{1.01}.}
\item{max_SD_error}{maximum MCMC error as the proportion of standard
deviation of the parameters. Defaults to \code{0.05}.}
\item{max_time}{list specifying the time \code{time} and \code{units}
after which the automatic fitting function is stopped. The units arguments
need to correspond to \code{units} passed to \link[base]{difftime} function.}
\item{sample_extend}{number of samples between each convergence check. Defaults to
\code{1000}.}
\item{restarts}{number of times new initial values should be generated in case the model
fails to initialize. Defaults to \code{10}.}
}}
\item{parallel}{whether the chains should be run in parallel \code{FALSE}}
\item{cores}{number of cores used for multithreading if \code{parallel = TRUE},
defaults to \code{chains}}
\item{silent}{whether the function should proceed silently, defaults to \code{TRUE}}
\item{seed}{seed for random number generation}
\item{add_parameters}{vector of additional parameter names that should be used
monitored but were not specified in the \code{prior_list}}
\item{required_packages}{character vector specifying list of packages containing
JAGS models required for sampling (in case that the function is run in parallel or in
detached R session). Defaults to \code{NULL}.}
\item{fit}{a 'BayesTools_fit' object (created by \code{JAGS_fit()} function) to be
extended}
}
\value{
\code{JAGS_fit} returns an object of class 'runjags' and 'BayesTools_fit'.
}
\description{
A wrapper around
\link[runjags]{run.jags} that simplifies fitting 'JAGS' models
with usage with pre-specified model part of the 'JAGS' syntax, data and list
of prior distributions.
}
\examples{
\dontrun{
# simulate data
set.seed(1)
data <- list(
x = rnorm(10),
N = 10
)
data$x
# define priors
priors_list <- list(mu = prior("normal", list(0, 1)))
# define likelihood for the data
model_syntax <-
"model{
for(i in 1:N){
x[i] ~ dnorm(mu, 1)
}
}"
# fit the models
fit <- JAGS_fit(model_syntax, data, priors_list)
}
}
\seealso{
\code{\link[=JAGS_check_convergence]{JAGS_check_convergence()}}
}
|
915c5da36463ab4f0efadd9b12f3456453d1e741
|
7a73427bdfd98932c7014f5f02f15e05953edb13
|
/R-codes/DivRank.R
|
ffef6e6888b8482c5d3a545d93bab6afd0871af1
|
[] |
no_license
|
haozhestat/DivRank
|
2784945ab657e79b8b20916e426b345e919c2844
|
20f6ece1600ec49283d8dd8444f86fc6365d2932
|
refs/heads/master
| 2020-04-02T04:43:13.909928
| 2016-06-08T18:29:18
| 2016-06-08T18:29:18
| 60,396,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
DivRank.R
|
# =============================================================================
# File Name: DivRank.R
# Author: Haozhe Zhang
# Contact: haozhe@iastate.edu
# Creation Date: 2016-06-07
# Last Modified: 2016-06-08 13:28:18 CDT
# =============================================================================
DivRank = function(weight_mat, lambda, alpha, prior_score=NULL,converg_error=10^(-3),max_iter=1000){
n = nrow(weight_mat)
if(is.null(prior_score))
prior_score = rep(1/n,n)
num_iter = 0
diff_error = 10^6
marg_prob = rep(1/n,n)
prob_int_mat = as.matrix(alpha*weight_mat/matrix(rep(apply(weight_mat, 1, sum),n),n,n))
diag(prob_int_mat) = (1-alpha)
while((num_iter<max_iter)&(diff_error>converg_error)){
tmp = prob_int_mat*t(matrix(rep(marg_prob,n),n,n))
prob_mat = (1-lambda)*t(matrix(rep(prior_score,n),n,n))+lambda*tmp/matrix(rep(rowSums(tmp),n),n,n)
marg_prob_new = marg_prob%*%prob_mat
num_iter = num_iter + 1
diff_error = sum(abs(marg_prob_new-marg_prob))/sum(abs(marg_prob))
marg_prob = marg_prob_new
}
return(list(divrank=order(marg_prob,decreasing=TRUE),marg_prob=as.vector(marg_prob),num_iter=num_iter))
}
|
b1208ed5ac461c314072cc67ff96b12846e26e9a
|
cb939b26da3f94f1a0b2d240251ae5b6a62817ac
|
/lisplib/manual/ch0.r
|
a97423ab4a0b57d9e52906ba2d6f107af3e10830
|
[
"BSD-4-Clause-UC"
] |
permissive
|
krytarowski/franz-lisp-christos
|
211926e85efc4e1e681f7613300eb0214554c353
|
7f529800f415d583b4c1f0ae90dec19ad3fb1a8a
|
refs/heads/master
| 2020-06-19T18:09:43.734498
| 2019-07-14T11:05:05
| 2019-07-14T11:05:05
| 196,815,573
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,185
|
r
|
ch0.r
|
The FRANZ LISP Manual
by
_J_o_h_n _K_. _F_o_d_e_r_a_r_o
_K_e_i_t_h _L_. _S_k_l_o_w_e_r
_K_e_v_i_n _L_a_y_e_r
June 1983
A document in
four movements
_O_v_e_r_t_u_r_e
_A _c_h_o_r_u_s _o_f _s_t_u_d_e_n_t_s _u_n_d_e_r _t_h_e _d_i_r_e_c_t_i_o_n _o_f
_R_i_c_h_a_r_d _F_a_t_e_m_a_n _h_a_v_e _c_o_n_t_r_i_b_u_t_e_d _t_o _b_u_i_l_d_i_n_g _F_R_A_N_Z
_L_I_S_P _f_r_o_m _a _m_e_r_e _m_e_l_o_d_y _i_n_t_o _a _f_u_l_l _s_y_m_p_h_o_n_y _.
_T_h_e _m_a_j_o_r _c_o_n_t_r_i_b_u_t_o_r_s _t_o _t_h_e _i_n_i_t_i_a_l _s_y_s_t_e_m _w_e_r_e
_M_i_k_e _C_u_r_r_y_, _J_o_h_n _B_r_e_e_d_l_o_v_e _a_n_d _J_e_f_f _L_e_v_i_n_s_k_y_.
_B_i_l_l _R_o_w_a_n _a_d_d_e_d _t_h_e _g_a_r_b_a_g_e _c_o_l_l_e_c_t_o_r _a_n_d _a_r_r_a_y
_p_a_c_k_a_g_e_. _T_o_m _L_o_n_d_o_n _w_o_r_k_e_d _o_n _a_n _e_a_r_l_y _c_o_m_p_i_l_e_r
_a_n_d _h_e_l_p_e_d _i_n _o_v_e_r_a_l_l _s_y_s_t_e_m _d_e_s_i_g_n_. _K_e_i_t_h
_S_k_l_o_w_e_r _h_a_s _c_o_n_t_r_i_b_u_t_e_d _m_u_c_h _t_o _F_R_A_N_Z _L_I_S_P_, _a_d_d_i_n_g
_t_h_e _b_i_g_n_u_m _p_a_c_k_a_g_e _a_n_d _r_e_w_r_i_t_i_n_g _m_o_s_t _o_f _t_h_e _c_o_d_e
_t_o _i_n_c_r_e_a_s_e _i_t_s _e_f_f_i_c_i_e_n_c_y _a_n_d _c_l_a_r_i_t_y_. _K_i_p_p
_H_i_c_k_m_a_n _a_n_d _C_h_a_r_l_e_s _K_o_e_s_t_e_r _a_d_d_e_d _h_u_n_k_s_. _M_i_t_c_h
_M_a_r_c_u_s _a_d_d_e_d _*_r_s_e_t_, _e_v_a_l_h_o_o_k _a_n_d _e_v_a_l_f_r_a_m_e_. _D_o_n
_C_o_h_e_n _a_n_d _o_t_h_e_r_s _a_t _C_a_r_n_e_g_i_e_-_M_e_l_l_o_n _m_a_d_e _s_o_m_e
_i_m_p_r_o_v_e_m_e_n_t_s _t_o _e_v_a_l_f_r_a_m_e _a_n_d _p_r_o_v_i_d_e_d _v_a_r_i_o_u_s
_f_e_a_t_u_r_e_s _m_o_d_e_l_l_e_d _a_f_t_e_r _U_C_I_/_C_M_U _P_D_P_-_1_0 _L_i_s_p _a_n_d
_I_n_t_e_r_l_i_s_p _e_n_v_i_r_o_n_m_e_n_t_s _(_e_d_i_t_o_r_, _d_e_b_u_g_g_e_r_, _t_o_p_-
_l_e_v_e_l_)_. _J_o_h_n _F_o_d_e_r_a_r_o _w_r_o_t_e _t_h_e _c_o_m_p_i_l_e_r_, _a_d_d_e_d _a
_f_e_w _f_u_n_c_t_i_o_n_s_, _a_n_d _w_r_o_t_e _m_u_c_h _o_f _t_h_i_s _m_a_n_u_a_l_. _O_f
_c_o_u_r_s_e_, _o_t_h_e_r _a_u_t_h_o_r_s _h_a_v_e _c_o_n_t_r_i_b_u_t_e_d _s_p_e_c_i_f_i_c
_c_h_a_p_t_e_r_s _a_s _i_n_d_i_c_a_t_e_d_. _K_e_v_i_n _L_a_y_e_r _m_o_d_i_f_i_e_d _t_h_e
_c_o_m_p_i_l_e_r _t_o _p_r_o_d_u_c_e _c_o_d_e _f_o_r _t_h_e _M_o_t_o_r_o_l_a _6_8_0_0_0_,
_a_n_d _h_e_l_p_e_d _m_a_k_e _F_R_A_N_Z _L_I_S_P _p_a_s_s _`_`_L_i_n_t_'_'_.
_T_h_i_s _m_a_n_u_a_l _m_a_y _b_e _s_u_p_p_l_e_m_e_n_t_e_d _o_r _s_u_p_p_l_a_n_t_e_d _b_y
_l_o_c_a_l _c_h_a_p_t_e_r_s _r_e_p_r_e_s_e_n_t_i_n_g _a_l_t_e_r_a_t_i_o_n_s_, _a_d_d_i_t_i_o_n_s
_a_n_d _d_e_l_e_t_i_o_n_s_. _W_e _a_t _U_._C_. _B_e_r_k_e_l_e_y _a_r_e _p_l_e_a_s_e_d _t_o
_l_e_a_r_n _o_f _g_e_n_e_r_a_l_l_y _u_s_e_f_u_l _s_y_s_t_e_m _f_e_a_t_u_r_e_s_, _b_u_g
_f_i_x_e_s_, _o_r _u_s_e_f_u_l _p_r_o_g_r_a_m _p_a_c_k_a_g_e_s_, _a_n_d _w_e _w_i_l_l
_a_t_t_e_m_p_t _t_o _r_e_d_i_s_t_r_i_b_u_t_e _s_u_c_h _c_o_n_t_r_i_b_u_t_i_o_n_s_.
(C) 1980, 1981, 1983 by the Regents of the University of
California. (exceptions: Chapters 13, 14 (first half), 15
and 16 have separate copyrights, as indicated. These are
reproduced by permission of the copyright holders.)
Permission to copy without fee all or part of this material
is granted provided that the copies are not made or dis-
tributed for direct commercial advantage, and the copyright
notice of the Regents, University of California, is given.
All rights reserved.
Work reported herein was supported in part by the U. S.
Department of Energy, Contract DE-AT03-76SF00034, Project
Agreement DE-AS03-79ER10358, and the National Science Foun-
dation under Grant No. MCS 7807291
UNIX is a trademark of Bell Laboratories. VAX and PDP are
trademarks of Digital Equiptment Coporation. MC68000 is a
trademark of Motorola Semiconductor Products, Inc.
Score
FFiirrsstt MMoovveemmeenntt _(_a_l_l_e_g_r_o _n_o_n _t_r_o_p_p_o_)
1. FRANZ LISP
_I_n_t_r_o_d_u_c_t_i_o_n _t_o _F_R_A_N_Z _L_I_S_P_, _d_e_t_a_i_l_s _o_f _d_a_t_a _t_y_p_e_s_,
_a_n_d _d_e_s_c_r_i_p_t_i_o_n _o_f _n_o_t_a_t_i_o_n
2. Data Structure Access
_F_u_n_c_t_i_o_n_s _f_o_r _t_h_e _c_r_e_a_t_i_o_n_, _d_e_s_t_r_u_c_t_i_o_n _a_n_d
_m_a_n_i_p_u_l_a_t_i_o_n _o_f _l_i_s_p _d_a_t_a _o_b_j_e_c_t_s_.
3. Arithmetic Functions
_F_u_n_c_t_i_o_n_s _t_o _p_e_r_f_o_r_m _a_r_i_t_h_m_e_t_i_c _o_p_e_r_a_t_i_o_n_s_.
4. Special Functions
_F_u_n_c_t_i_o_n_s _f_o_r _a_l_t_e_r_i_n_g _f_l_o_w _o_f _c_o_n_t_r_o_l_. _F_u_n_c_t_i_o_n_s
_f_o_r _m_a_p_p_i_n_g _o_t_h_e_r _f_u_n_c_t_i_o_n_s _o_v_e_r _l_i_s_t_s_.
5. I/O Functions
_F_u_n_c_t_i_o_n_s _f_o_r _r_e_a_d_i_n_g _a_n_d _w_r_i_t_i_n_g _f_r_o_m _p_o_r_t_s_.
_F_u_n_c_t_i_o_n_s _f_o_r _t_h_e _m_o_d_i_f_i_c_a_t_i_o_n _o_f _t_h_e _r_e_a_d_e_r_'_s
_s_y_n_t_a_x_.
6. System Functions
_F_u_n_c_t_i_o_n_s _f_o_r _s_t_o_r_a_g_e _m_a_n_a_g_e_m_e_n_t_, _d_e_b_u_g_g_i_n_g_, _a_n_d
_f_o_r _t_h_e _r_e_a_d_i_n_g _a_n_d _s_e_t_t_i_n_g _o_f _g_l_o_b_a_l _L_i_s_p _s_t_a_t_u_s
_v_a_r_i_a_b_l_e_s_. _F_u_n_c_t_i_o_n_s _f_o_r _d_o_i_n_g _U_N_I_X_-_s_p_e_c_i_f_i_c
_t_a_s_k_s _s_u_c_h _a_s _p_r_o_c_e_s_s _c_o_n_t_r_o_l_.
SSeeccoonndd MMoovveemmeenntt _(_L_a_r_g_o_)
7. The Reader
_A _d_e_s_c_r_i_p_t_i_o_n _o_f _t_h_e _s_y_n_t_a_x _c_o_d_e_s _u_s_e_d _b_y _t_h_e
_r_e_a_d_e_r_. _A_n _e_x_p_l_a_n_a_t_i_o_n _o_f _c_h_a_r_a_c_t_e_r _m_a_c_r_o_s_.
8. Functions, Fclosures, and Macros
_A _d_e_s_c_r_i_p_t_i_o_n _o_f _v_a_r_i_o_u_s _t_y_p_e_s _o_f _f_u_n_c_t_i_o_n_a_l
_o_b_j_e_c_t_s_. _A_n _e_x_a_m_p_l_e _o_f _t_h_e _u_s_e _o_f _f_o_r_e_i_g_n _f_u_n_c_-
_t_i_o_n_s_.
9. Arrays and Vectors
_A _d_e_t_a_i_l_e_d _d_e_s_c_r_i_p_t_i_o_n _o_f _t_h_e _p_a_r_t_s _o_f _a_n _a_r_r_a_y
_a_n_d _o_f _M_a_c_l_i_s_p _c_o_m_p_a_t_i_b_l_e _a_r_r_a_y_s_.
10. Exception Handling
_A _d_e_s_c_r_i_p_t_i_o_n _o_f _t_h_e _e_r_r_o_r _h_a_n_d_l_i_n_g _s_e_q_u_e_n_c_e _a_n_d
_o_f _a_u_t_o_l_o_a_d_i_n_g_.
TThhiirrdd MMoovveemmeenntt _(_S_c_h_e_r_z_o_)
11. The Joseph Lister Trace Package
_A _d_e_s_c_r_i_p_t_i_o_n _o_f _a _v_e_r_y _u_s_e_f_u_l _d_e_b_u_g_g_i_n_g _a_i_d_.
12. Liszt, the lisp compiler
_A _d_e_s_c_r_i_p_t_i_o_n _o_f _t_h_e _o_p_e_r_a_t_i_o_n _o_f _t_h_e _c_o_m_p_i_l_e_r _a_n_d
_h_i_n_t_s _f_o_r _m_a_k_i_n_g _f_u_n_c_t_i_o_n_s _c_o_m_p_i_l_a_b_l_e_.
13. CMU Top Level and File Package
_A _d_e_s_c_r_i_p_t_i_o_n _o_f _a _t_o_p _l_e_v_e_l _w_i_t_h _a _h_i_s_t_o_r_y _m_e_c_h_a_-
_n_i_s_m _a_n_d _a _p_a_c_k_a_g_e _w_h_i_c_h _h_e_l_p_s _y_o_u _k_e_e_p _t_r_a_c_k _o_f
_f_i_l_e_s _o_f _l_i_s_p _f_u_n_c_t_i_o_n_s_.
14 Stepper
_A _d_e_s_c_r_i_p_t_i_o_n _o_f _a _p_r_o_g_r_a_m _w_h_i_c_h _p_e_r_m_i_t_s _y_o_u _t_o
_p_u_t _b_r_e_a_k_p_o_i_n_t_s _i_n _l_i_s_p _c_o_d_e _a_n_d _t_o _s_i_n_g_l_e _s_t_e_p
_i_t_. _A _d_e_s_c_r_i_p_t_i_o_n _o_f _t_h_e _e_v_a_l_h_o_o_k _a_n_d _f_u_n_c_a_l_l_h_o_o_k
_m_e_c_h_a_n_i_s_m_.
15 Fixit
_A _p_r_o_g_r_a_m _w_h_i_c_h _p_e_r_m_i_t_s _y_o_u _t_o _e_x_a_m_i_n_e _a_n_d _m_o_d_i_f_y
_e_v_a_l_u_a_t_i_o_n _s_t_a_c_k _i_n _o_r_d_e_r _t_o _f_i_x _b_u_g_s _o_n _t_h_e _f_l_y_.
16 Lisp Editor
_A _s_t_r_u_c_t_u_r_e _e_d_i_t_o_r _f_o_r _i_n_t_e_r_a_c_t_i_v_e _m_o_d_i_f_i_c_a_t_i_o_n _o_f
_l_i_s_p _c_o_d_e_.
FFiinnaall MMoovveemmeenntt _(_a_l_l_e_g_r_o_)
Appendix A - Function Index
Appendix B - List of Special Symbols
Appendix C - Short Subjects
_G_a_r_b_a_g_e _c_o_l_l_e_c_t_o_r_, _D_e_b_u_g_g_i_n_g_, _D_e_f_a_u_l_t _T_o_p _L_e_v_e_l
|
ca16a43988d9eee9eebfbff1a6f877553c594500
|
2f3068f3b1011d1325379d7951483c129d5b521c
|
/R/karyotype-database/lib/selection.R
|
872373210e6beb4fb5833000b0396768c6b20090
|
[
"Apache-2.0"
] |
permissive
|
hjanime/IGCSA
|
928c64e5bb67cc5ce8f801c46accc7fd7f48539a
|
9b8f1a5b6e572f780e04ae7698d25540fb7498ae
|
refs/heads/master
| 2021-01-21T00:45:14.105061
| 2015-10-12T13:08:50
| 2015-10-12T13:08:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,226
|
r
|
selection.R
|
roll<-function(probs)
{
rand = runif(1,0,1)
match = probs[probs >= rand]
row = match[1]
return(which(probs == row))
}
set.probs<-function(probs, df)
{
for(i in length(probs):1)
df[i,'p']<-sum(probs[i:1])
return(df)
}
select.bp<-function(bpd, s=1000, col=3)
{
bpd = bpd[order(bpd[,col]),]
bpd = set.probs(bpd[,col], bpd)
bp_chr_counts = vector("numeric", length(unique(bpd[,'chr'])))
names(bp_chr_counts) = unique(bpd[,'chr'])
bp_selected = bpd[,c('chr','band')]
bp_selected$count = 0
for (i in 1:s)
{
n = roll(bpd$p)
chr = as.character(bpd[n,'chr'])
band = as.character(bpd[n,'band'])
bp_selected[which(bp_selected$chr == chr & bp_selected$band == band), 'count'] = bp_selected[which(bp_selected$chr == chr & bp_selected$band == band), 'count']+1
bp_chr_counts[chr] = bp_chr_counts[chr] + 1
}
bp_selected = bp_selected[ order(bp_selected$count),]
bp_selected = bp_selected[ bp_selected$count > 0, ]
bp_chr_counts = bp_chr_counts[bp_chr_counts > 0]
return(list("bp" = bp_selected, "chr.counts" = bp_chr_counts))
}
select.chr<-function(cdd, cbpd, s=100, plot=F)
{
cdd = cdd[order(cdd[,'probs']),]
cdd = set.probs(cdd[,'probs'], cdd)
selected = cbpd[,c('chr','band')]
selected$count = 0
# select some chromosomes
chrs = vector("numeric", s)
for (jj in 1:length(chrs))
chrs[jj] = cdd[roll(cdd$p),'chr']
chrs = unique(chrs)
# select bps from that chromosome
for (chr in chrs)
{
sub = cbpd[cbpd$chr == chr, ]
sub = sub[order(sub[,3]),]
bps = select.bp(sub, s=sample(1:2,1))$bp
for(r in 1:nrow(bps))
{
band = which(selected[,'chr'] == chr & selected[,'band'] == bps[r,'band'])
selected[ band, 'count'] = selected[ band, 'count'] + 1
}
}
selected = selected[order(selected$count), ]
chr_counts = vector("numeric", nrow(cdd))
names(chr_counts) = cdd[,'chr']
for (chr in cdd[,'chr'])
chr_counts[[as.character(chr)]] = sum( selected[ selected[,'chr'] == chr, 'count'] )
selected = selected[ selected$count > 0, ]
chr_counts = chr_counts[chr_counts > 0]
return(list("bp" = selected, "chr.counts" = chr_counts))
}
|
f4e93b81c22feff26fd6f86b8bf881ccec97e11c
|
cfb444f0995fce5f55e784d1e832852a55d8f744
|
/man/unif2norm.Rd
|
39161e5329c5f1fc1977ed68214bdf903924aa2c
|
[
"MIT"
] |
permissive
|
debruine/faux
|
3a9dfc44da66e245a7b807220dd7e7d4ecfa1317
|
f2be305bdc6e68658207b4ad1cdcd2d4baa1abb4
|
refs/heads/master
| 2023-07-19T18:28:54.258681
| 2023-07-07T16:59:24
| 2023-07-07T16:59:24
| 163,506,566
| 87
| 15
|
NOASSERTION
| 2023-01-30T10:09:37
| 2018-12-29T11:43:04
|
R
|
UTF-8
|
R
| false
| true
| 878
|
rd
|
unif2norm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distribution_convertors.R
\name{unif2norm}
\alias{unif2norm}
\title{Convert uniform to normal}
\usage{
unif2norm(x, mu = 0, sd = 1, min = NULL, max = NULL)
}
\arguments{
\item{x}{the uniformly distributed vector}
\item{mu}{the mean of the normal distribution to return}
\item{sd}{the SD of the normal distribution to return}
\item{min}{the minimum possible value of x (calculated from x if not given)}
\item{max}{the maximum possible value of x (calculated from x if not given)}
}
\value{
a vector with a gaussian distribution
}
\description{
Convert a uniform distribution to a normal (gaussian) distribution with specified mu and sd
}
\examples{
x <- runif(10000)
y <- unif2norm(x)
g <- ggplot2::ggplot() + ggplot2::geom_point(ggplot2::aes(x, y))
ggExtra::ggMarginal(g, type = "histogram")
}
|
31a14be6d361f4fc217e698a8d9d3b9dc3294a94
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/rlfsm/tests/testthat/test_increms.R
|
046014e26872aa39603af33b0e8998fbdabb985e
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,625
|
r
|
test_increms.R
|
context("Increment functions checks")
#### Parameter setup
m<-25; M<-60; N<-2^12-M
alpha<-1.8; H<-0.8; sigma<-1.8
k<-2; p<-0.3; p_prime<-0.1
t1<-1; t2<-2
###################
List<-path(N,m,M,alpha,H,sigma,freq='L',disable_X=FALSE,levy_increments=NULL,seed=NULL)
X<-List$lfsm
###################
test_that("increment(s) should return a numeric", {
expect_is(increment(r=2, i=k+5, k, X), "numeric")
expect_is(increments(2, k, X), "numeric")
})
test_that("increment(s) should return an error here", {
expect_error(increment(r=2, i=4, k=length(X), X))
expect_error(increments(2, k, NULL))
})
###################
test_that("Computation of increments is consistent", {
expect_equal(X[2]-X[1], increment(r=1, i=1, k=1, X))
expect_equal(increments(r=3, k=1, X)[1], increment(r=3, i=3, k=1, X))
expect_equal(increments(r=3, k=1, X), increment(r=3, i=seq(1*3, (length(X)-1)), k=1, X))
})
X_1=c(1,4,3,6,8,5,3,5,8,5,1,8,6)
r=1; k=1
n <- length(X_1) - 1
DeltaX = increment(seq(r*k, n), path = X_1, k = k, r = r)
test_that("Computation of increments is consistent", {
expect_equal(DeltaX, increments(k=k,r=r,X_1))
})
r=2; k=1
DeltaX = increment(seq(r*k, n), path = X_1, k = k, r = r)
sum(DeltaX == increments(k=k,r=r,X_1)) == length(DeltaX)
test_that("Computation of increments is consistent", {
expect_equal(DeltaX, increments(k=k,r=r,X_1))
})
r=2; k=2
DeltaX = increment(seq(r*k, n), path = X_1, k = k, r = r)
test_that("Computation of increments is consistent", {
expect_equal(DeltaX, increments(k=k,r=r,X_1))
})
###################
###################
###################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.