blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa1501ce3c3d4665644ab5564e9ebc3d33f4b24a
|
cb7f0408c26721655435a7d14aa0fc9a3cb753a4
|
/filter_20uM_competition.R
|
7b88bf40000460094705b84cfdf8156493c220c9
|
[] |
no_license
|
liuxianghui/FBDDinCell
|
90b1eb2f58a532d4697df16109b636e902b4e7ec
|
fe7d31c0f01e8b90541435db8ea9af7b643d75a7
|
refs/heads/master
| 2021-04-08T10:39:35.739644
| 2016-12-09T22:40:56
| 2016-12-09T22:40:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 750
|
r
|
filter_20uM_competition.R
|
data <-
read.csv("./20uMtargets_found_200uMtargets.csv", header = TRUE)
ratio_cutoff <- 5
data[data$coumarin_20uM_293T < ratio_cutoff, grep("coumarin", colnames(data), value = TRUE)[-1]] <-
"-"
data[data$pipphen_20uM_293T < ratio_cutoff, grep("pipphen", colnames(data), value = TRUE)[-1]] <-
"-"
data[data$hydrooxoquin_20uM_293T < ratio_cutoff, grep("hydrooxoquin", colnames(data), value = TRUE)[-1]] <-
"-"
data <-
data[!(
data$coumarin_20uM_293T < ratio_cutoff &
data$pipphen_20uM_293T < ratio_cutoff &
data$hydrooxoquin_20uM_293T < ratio_cutoff &
data$X20phenpip_w_B00174059 == 0 & data$X20phenpip_w_B00174040 == 0
), ]
write.csv(data, file = "20uMtargets_found_200uMtargets_filtered.csv", row.names = FALSE)
|
8577844cc6677a9714de53590979c3779e3c3f88
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/comf/R/fctdTNZ.r
|
a0190143b6311433efa02091bdc319c254671c9d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,575
|
r
|
fctdTNZ.r
|
# Functions return:
#
# dTNZ -
# dTNZTa -
# dTNZTs -
#
# File contains 1 function:
# - calcdTNZ(182, 82, 21, 1, .5, .1, 36, 20)
# returns dTNZ, dTNZTa, dTNZTs
#
# v1.0 done by Marcel Schweiker in cooperation with B. Kingma
# Function: dTNZ ################
###########################################
calcdTNZ <- function(ht, wt, age, gender, clo, vel, tsk, ta, met, deltaT =.1){
# calculation of surface area according to duBois
#sa <- (wt ^ .425*ht ^ .725)*.007184
# calculation of surface area according to mosteller
sa <- (wt ^ .5 * ht ^ .5) * 1 / 60
# calculation of basal metabolic rate according to Harris-Benedict equations revised by Roza and Shizgal in 1984
if (gender == 1){ # female
basMet <- 447.593 + (9.247 * wt) + (3.098 * ht) - (4.330 * age)
} else {
basMet <- 88.362 + (13.397 * wt) + (4.799 * ht) - (5.677 * age)
}
basMet <- 4164 / 86400 * basMet
basMet <- basMet * 5 / 4 # adjust for basMet = .8 met
# definition of resolution of combinations
deltaT <- .1
mult <- 1 / deltaT
seqi <- seq(deltaT, 20, deltaT)
seqj <- seq(deltaT, 26, deltaT)
offseqi <- 19
offseqj <- 9
alpha <- 0.08 # []
w <- 0.06 # skin wettedness fraction []
gammac <- 0.00750061683 # [mmHg/pa]
lambda <- 2.2 # [degree C/mmHg] Lewis relation
phi <- 0.50 # [%]
Tcmin <- 36 # [degree C]
Tcmax <- 38 # [degree C]
cloV <- clo
velV <- vel
velVStill <- 100 * 0.05
tskObs <- tsk
taObs <- ta
A <- sa
dTNZHori <- wert <- dTNZVert <- dTNZ <- dTNZTs <- dTNZTa <- NA
# get TNZ for each timestept of data
#for ( k in 1:nrow(dfsubj)){
icl <- 0.155 * cloV # [m2 degree C/W]
va <- velV # [m/s]
# convert va to cm/s
va <- va * 100 # [cm/s]
mmin <- basMet * (met-.1) # [W]
mmax <- basMet * (met+.1) # [W]
IBodymax <- 0.112 # [m2 degree C/W] see Veicsteinas et al.
IBodymin <- 0.031 # [m2 degree C/W]
Tsmin <- Tcmin - (1 - alpha) * mmax * IBodymax / A # [degree C]
Tsmax <- Tcmax - (1 - alpha) * mmin * IBodymin / A # [degree C]
# define empty matrix
Tc <- IBodyx <- tax <- Tsx <- Iairx <- Q <- data.frame(matrix(ncol = length(seqi), nrow = length(seqj)))
ta <- matrix(rep((seqj + offseqj), length(seqi)), length(seqj), length(seqi))
Ts <- matrix(rep((seqi + offseqi), each = length(seqj)), length(seqj), length(seqi))
IBody <- IBodymax + ((IBodymax - IBodymin) / (Tsmin - Tsmax)) * (Ts - Tsmin)
IBody <- ifelse(IBody > IBodymax, IBodymax, IBody)
IBody <- ifelse(IBody < IBodymin, IBodymin, IBody)
Iair <- 1 / ((0.19 * sqrt(va) * (298 / (ta + 273.15))) + (0.61 * ((ta + 273.15) / 298) ^ 3))
#a1 <- 0.8*(0.19*sqrt(velVStill)*(298/(ta+273.15))) + 0.2*(0.19*sqrt(va)*(298/(ta+273.15)))
#a2 <- (0.61*((ta+273.15)/298) ^ 3)
#Iair <- 1/(a1+a2) # weighted average due to body not completely exposed to air velocity
Iair <- Iair * 0.155 # to adjust from clo unit to m2K/w
hconv <- 1 / Iair - (0.61 * ((ta + 273.15) / 298) ^ 3) / 0.155 # [W/m2 degree C]
#hconv <- 0.19*sqrt(va)*(298/(ta+273.15))
ps <- gammac * 100 * exp(18.965-4030 / (Ts + 235))
pair <- gammac * phi * 100 * exp(18.965 - 4030/(ta + 235))
fpcl <- 1 / (1 + hconv * icl)
Qe <- A * w * lambda * hconv * (ps - pair) * fpcl
Qrc <- (A / (icl + Iair)) * (Ts - ta)
# calculating and storing values for Tc
Tc <- Ts + (IBody / (1 - alpha)) * ((Ts - ta) / (icl + Iair) + (Qe / A))
# calculation and storing values for Q: metabolic heat production
Q <- Qrc + Qe
## storing IBody values in matrix
#IBodyx[jmult, imult] <- IBody
## storing Ambient temperature values in matrix
tax <- ta
## storing skin temperature values in matrix
Tsx <- Ts
## storing Iair values in matrix
#Iairx[jmult, imult] <- Iair
colnames(Tc) <- colnames(Q) <- seqi + offseqi #
rownames(Tc) <- rownames(Q) <- seqj + offseqj #
#setting all to NA, where Tc is unrealistic 36 < Tc < 38
Q[Tc > Tcmax] <- NA
Q[Tc < Tcmin] <- NA
Tc[Tc > Tcmax] <- NA
Tc[Tc < Tcmin] <- NA
# defintion of vectors used for image()
taGr <- seqj + offseqj
TsGr <- seqi + offseqi
Qtnz <- Q
Qtnz[Q < (1 - alpha) * mmin] <- NA
Qtnz[Q > (1 - alpha) * mmax] <- NA
#########################
# from here get dTNZ (vertical + horizontal)
#########################
# transfer values in Qtnz to values of columns (tsk)
QtnzTs <- Qtnz
listTs <- NA
g <- 1
for (i in 1:ncol(Qtnz)){
for (j in 1:nrow(Qtnz)){
if (!is.na(Qtnz[j, i])){
# QtnzTs[j, i] <- as.numeric(colnames(Qtnz)[i])
listTs[g] <- as.numeric(colnames(Qtnz)[i])
g <- g + 1
}
}
}
#QtnzTs
tskCentroid <- median(listTs)
# transfer values in Qtnz to values of columns (tsk)
Qtnztop <- Qtnz
listtop <- NA
g <- 1
for (i in 1:ncol(Qtnz)){
for (j in 1:nrow(Qtnz)){
if (!is.na(Qtnz[j, i])){
# Qtnztop[j, i] <- as.numeric(row.names(Qtnz)[j])
listtop[g] <- as.numeric(row.names(Qtnz)[j])
g <- g + 1
}
}
}
#Qtnztop
topCentroid <- median(listtop)
#%calculate distance to TNZ (dTNZ)
dTNZ <- round(sqrt((taObs-topCentroid) ^ 2 + (tskObs-tskCentroid) ^ 2 ), 2); # abs value of distance
dTNZTs <- round(tskObs-tskCentroid, 2) # rel value of distance assuming tskin to be dominant for sensation
dTNZTa <- round(taObs-topCentroid, 2) # rel value of distance assuming tambient to be dominant for sensation
tskCentroid <- round(tskCentroid, 2)
topCentroid <- round(topCentroid, 2)
data.frame(dTNZ, dTNZTs, dTNZTa, tskCentroid, topCentroid)
}
|
f826461369e0d8f2ce3ff9f64e282ea542734273
|
aa0d6b917e3fadaec70743d46df8e85b8e3c7d55
|
/src/r/SQ_Wheat_Phenology/Phylsowingdatecorrection.r
|
21b57fdad1b8cb4355d460f691f78484033307b8
|
[
"MIT"
] |
permissive
|
AgriculturalModelExchangeInitiative/SQ_Wheat_Phenology
|
816bc340971c2707a27db1dacc5c286e8df5d3d9
|
9f3426ada2d7913f903701b261e613ac63cbc3d3
|
refs/heads/master
| 2022-02-05T08:25:40.063564
| 2022-02-01T08:45:31
| 2022-02-01T08:45:31
| 159,197,361
| 5
| 5
|
MIT
| 2022-02-01T08:45:32
| 2018-11-26T16:12:34
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 5,343
|
r
|
Phylsowingdatecorrection.r
|
model_phylsowingdatecorrection <- function (sowingDay = 1,
latitude = 0.0,
sDsa_sh = 1.0,
rp = 0.0,
sDws = 1,
sDsa_nh = 1.0,
p = 120.0){
#'- Name: PhylSowingDateCorrection -Version: 1.0, -Time step: 1
#'- Description:
#' * Title: PhylSowingDateCorrection Model
#' * Author: Loic Manceau
#' * Reference: Modeling development phase in the
#' Wheat Simulation Model SiriusQuality.
#' See documentation at http://www1.clermont.inra.fr/siriusquality/?page_id=427
#' * Institution: INRA Montpellier
#' * Abstract: Correction of the Phyllochron Varietal parameter according to sowing date
#'- inputs:
#' * name: sowingDay
#' ** description : Day of Year at sowing
#' ** parametercategory : species
#' ** datatype : INT
#' ** min : 1
#' ** max : 365
#' ** default : 1
#' ** unit : d
#' ** uri : some url
#' ** inputtype : parameter
#' * name: latitude
#' ** description : Latitude
#' ** parametercategory : soil
#' ** datatype : DOUBLE
#' ** min : -90
#' ** max : 90
#' ** default : 0.0
#' ** unit : °
#' ** uri : some url
#' ** inputtype : parameter
#' * name: sDsa_sh
#' ** description : Sowing date at which Phyllochrone is maximum in southern hemispher
#' ** parametercategory : species
#' ** inputtype : parameter
#' ** datatype : DOUBLE
#' ** min : 1
#' ** max : 365
#' ** default : 1.0
#' ** unit : d
#' ** uri : some url
#' * name: rp
#' ** description : Rate of change of Phyllochrone with sowing date
#' ** parametercategory : species
#' ** inputtype : parameter
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 365
#' ** default : 0
#' ** unit : d-1
#' ** uri : some url
#' * name: sDws
#' ** description : Sowing date at which Phyllochrone is minimum
#' ** parametercategory : species
#' ** datatype : INT
#' ** default : 1
#' ** min : 1
#' ** max : 365
#' ** unit : d
#' ** uri : some url
#' ** inputtype : parameter
#' * name: sDsa_nh
#' ** description : Sowing date at which Phyllochrone is maximum in northern hemispher
#' ** parametercategory : species
#' ** datatype : DOUBLE
#' ** default : 1.0
#' ** min : 1
#' ** max : 365
#' ** unit : d
#' ** uri : some url
#' ** inputtype : parameter
#' * name: p
#' ** description : Phyllochron (Varietal parameter)
#' ** parametercategory : species
#' ** datatype : DOUBLE
#' ** default : 120
#' ** min : 0
#' ** max : 1000
#' ** unit : °C d leaf-1
#' ** uri : some url
#' ** inputtype : parameter
#'- outputs:
#' * name: fixPhyll
#' ** description : Phyllochron Varietal parameter
#' ** variablecategory : auxiliary
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 1000
#' ** unit : °C d leaf-1
if (latitude < 0.0)
{
if (sowingDay > as.integer(sDsa_sh))
{
fixPhyll <- p * (1 - (rp * min((sowingDay - sDsa_sh), sDws)))
}
else
{
fixPhyll <- p
}
}
else
{
if (sowingDay < as.integer(sDsa_nh))
{
fixPhyll <- p * (1 - (rp * min(sowingDay, sDws)))
}
else
{
fixPhyll <- p
}
}
return (list('fixPhyll' = fixPhyll))
}
|
0b558fe1e25e2bd8ca5bc2ec7565eb2ab14b2b36
|
18ed4435f80350e72aadb4bd2a3f21bbce67fd0e
|
/R/estPlots.R
|
4a27410c96feeebc3fd771a0c948b138a0205359
|
[] |
no_license
|
JohannesFriedrich/api-wrapper.r
|
15e76cea947bbe83802b939930541f0d93af1541
|
e845eece1e3da891dc40f2aa443cf1e37311b008
|
refs/heads/master
| 2021-01-05T05:26:56.059461
| 2019-08-02T13:50:28
| 2019-08-02T13:50:28
| 240,896,621
| 1
| 0
| null | 2020-02-16T13:29:37
| 2020-02-16T13:29:36
| null |
UTF-8
|
R
| false
| false
| 8,264
|
r
|
estPlots.R
|
# // Copyright (C) 2017 Simon Müller
# // This file is part of EventStudy
# //
# // EventStudy is free software: you can redistribute it and/or modify it
# // under the terms of the GNU General Public License as published by
# // the Free Software Foundation, either version 2 of the License, or
# // (at your option) any later version.
# //
# // EventStudy is distributed in the hope that it will be useful, but
# // WITHOUT ANY WARRANTY; without even the implied warranty of
# // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# // GNU General Public License for more details.
# //
# // You should have received a copy of the GNU General Public License
# // along with EventStudy If not, see <http://www.gnu.org/licenses/>.
#' @name arPlot
#'
#' @title Abnormal Return Plot
#'
#' @description Plot abnormal returns in the event window of single or multiple
#' firms.
#'
#' @param ResultParserObj An object of class \code{ResultParser}
#' @param firm set this parameter if just a subset of firms should be plotted
#' @param window filter event time window
#' @param xlab x-axis label of the plot
#' @param ylab y-axis label
#' @param alpha alpha value
#' @param facetVar should each firm get its own plot. You may plot each firm in
#' an own plot or by each group. (Default: NULL, available: Group and Firm)
#' @param ncol number of facet columns
#' @param addAAR add aar line
#' @param xVar x variable name
#' @param yVar y variable name
#'
#' @return a ggplot2 object
#'
#' @examples
#' \dontrun{
#' # plot abnormal returns in one plot
#' arPlot(resultParser)
#'
#' # plot abnormal returns by group
#' arPlot(resultParser, facetVar = "Group")
#' }
#'
#' @export
arPlot <- function(ResultParserObj, firm = NULL, window = NULL,
xlab = "", ylab = "Abnormal Returns",
alpha = .5,
facetVar = NULL, ncol = 4,
addAAR = F,
xVar = "eventTime", yVar = "ar") {
# CRAN check
Firm <- eventTime <- y <- NULL
if (!is.null(facetVar))
facetVar <- match.arg(facetVar, c("Firm", "Group"))
ar <- ResultParserObj$arResults
if (!is.null(firm)) {
ar %>%
dplyr::filter(Firm == firm) -> ar
}
if (is.null(window))
window <- range(ar$eventTime)
selectedWindow <- seq(from = window[1], to = window[2], by = 1)
pal <- RColorBrewer::brewer.pal(3, "Blues")
ar %>%
dplyr::filter(eventTime %in% selectedWindow) -> ar
ar %>%
ggplot() +
geom_hline(yintercept = 0, color = "black", alpha = .5) +
geom_vline(xintercept = 0, color = "black", linetype = 2, alpha = .5) +
geom_line(aes_string(x = xVar, y = yVar, group = "Firm"),
color = pal[3], alpha = alpha) +
scale_y_continuous(labels = scales::percent) +
xlab(xlab) +
ylab(ylab) +
theme_tq() -> q
if (addAAR) {
if (facetVar != "Firm") {
data.table::setnames(ar, yVar, "y")
ar %>%
dplyr::group_by_(.dots = c(xVar, facetVar)) %>%
dplyr::summarise(y = mean(y, na.rm = T)) -> mAr
data.table::setnames(ar, "y", yVar)
q <- q +
geom_line(data = mAr, aes_string(x = xVar, y = "y"), color = "black")
}
}
if (!is.null(facetVar)) {
facetForm <- as.formula(paste0(" ~ ", facetVar))
q <- q +
facet_wrap(facetForm, ncol = ncol, scales = "free")
}
q
}
#' @name aarPlot
#'
#' @title Averaged Abnormal Return Plot
#'
#' @description Averaged abnormal return plots with confidence intervals
#'
#' For more details see the help vignette:
#' \code{vignette("parameters_eventstudy", package = "EventStudy")}
#'
#' @param ResultParserObj An object of class \code{ResultParser}
#' @param cumSum plot CAAR
#' @param group set this parameter if just one group should be plotted
#' @param window numeric vector of length 2
#' @param ciStatistics Statistic used for confidence intervals
#' @param p p-value
#' @param ciType type of CI band
#' @param xlab x-axis label
#' @param ylab y-axis label
#' @param facet should each firm get its own plot (default = T)
#' @param ncol number of facet columns
#'
#' @return a ggplot2 object
#'
#' @examples
#' \dontrun{
#' # plot averaged abnormal returns in one plot
#' aarPlot(resultParser)
#'
#' # plot averaged abnormal returns with .95-CI
#' arPlot(resultParser, ciStatistics = "Patell Z",p = .95)
#' }
#'
#' @export
aarPlot <- function(ResultParserObj,
cumSum = F,
group = NULL,
window = NULL,
ciStatistics = NULL,
p = .95,
ciType = "band",
xlab = "",
ylab = "Averaged Abnormal Returns",
facet = T,
ncol = 4) {
# CRAN check
level <- eventTime <- lower <- upper <- NULL
aar <- ResultParserObj$aarResults
if (cumSum) {
aar %>%
ResultParserObj$cumSum(var = "aar",
timeVar = "eventTime",
cumVar = "level") -> aar
}
if (!cumSum && !is.null(ciStatistics)) {
ciInterval <- ResultParserObj$calcAARCI(statistic = ciStatistics,
p = p)
aar$lower <- ciInterval$lower
aar$upper <- ciInterval$upper
}
if (!is.null(group)) {
aar %>%
dplyr::filter(level == group) -> aar
}
if (is.null(window))
window <- range(aar$eventTime)
selectedWindow <- seq(from = window[1], to = window[2], by = 1)
pal <- RColorBrewer::brewer.pal(3, "Blues")
aar %>%
dplyr::filter(eventTime %in% selectedWindow) -> aar
aar %>%
dplyr::mutate(aar = as.numeric(aar)) %>%
ggplot() +
geom_hline(yintercept = 0, color = "black", alpha = .5) +
geom_vline(xintercept = 0, color = "black", linetype = 2, alpha = .5) +
geom_line(aes(x = eventTime, y = aar), color = pal[3]) +
scale_y_continuous(labels = scales::percent) +
xlab(xlab) +
ylab(ylab) +
theme_tq() -> q
# plot CI
if (!cumSum && !is.null(ciStatistics)) {
if (ciType == "band") {
q <- q +
geom_line(aes(x = eventTime, y = lower), linetype = 2, color = "gray50", alpha = .5) +
geom_line(aes(x = eventTime, y = upper), linetype = 2, color = "gray50", alpha = .5)
} else if (ciType == "ribbon") {
q <- q +
geom_ribbon(aes(x = eventTime, ymin = lower, ymax = upper), fill = "gray50", alpha = .25)
}
}
# facet wrap
if (facet) {
q <- q +
facet_wrap( ~ level, ncol = ncol, scales = "free")
}
q
}
#' @name pointwiseCARPlot
#'
#' @title Pointwise Cumulative Abnormal Return Plot
#'
#' @description Pointwise cumulative abnormal return plots
#'
#' @param df data.frame with abnormal return in long format;
#' @param firm set this parameter if just one firm should be plotted
#' @param xlab x-axis label
#' @param ylab y-axis label
#' @param facetVar should each firm get its own plot. You may plot each firm in
#' an own plot or by each group. (Default: NULL, available: Group and Firm)
#' @param ncol number of facet columns
#'
#' @return a ggplot2 object
#'
#' @examples
#' \dontrun{
#' # plot abnormal returns in one plot
#' arPlot(resultParser)
#'
#' # plot abnormal returns by group
#' arPlot(resultParser, facetVar = "Group")
#'
#' This function must be revised
#' }
#'
#' @keywords internal
pointwiseCARPlot <- function(df, firm = NULL,
xlab = "", ylab = "pointwise Cumulative Abnormal Returns",
facetVar = NULL, ncol = 4) {
# CRAN check
Firm <- car <- NULL
# check facet variable
if (!is.null(facetVar))
facetVar <- match.arg(facetVar, c("Firm", "Group"))
if (!is.null(firm)) {
df %>%
dplyr::filter(Firm == firm) -> df
}
# calculate cumulative sum
df <- data.table::as.data.table(df)
data.table::setkeyv(df, c("Firm", "eventTime"))
df[, car := cumsum(ar), by = Firm]
# plot pCAR
df %>%
arPlot(xlab = xlab,
ylab = ylab,
facetVar = facetVar,
ncol = ncol,
xVar = "eventTime",
yVar = "car")
}
|
df7af518e6ab2735ab814c47f0a2235371fcb4a4
|
d859174ad3cb31ab87088437cd1f0411a9d7449b
|
/autonomics.import/man/weights.Rd
|
549f6954dc76fcdfbe91c7e4ec3e344953252a7d
|
[] |
no_license
|
bhagwataditya/autonomics0
|
97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c
|
c7ca7b69161e5181409c6b1ebcbeede4afde9974
|
refs/heads/master
| 2023-02-24T21:33:02.717621
| 2021-01-29T16:30:54
| 2021-01-29T16:30:54
| 133,491,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,402
|
rd
|
weights.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getters_setters.R
\name{weights}
\alias{weights}
\alias{weights,SummarizedExperiment-method}
\alias{weights,ExpressionSet-method}
\alias{weights,EList-method}
\alias{weights<-}
\alias{weights<-,SummarizedExperiment,matrix-method}
\alias{weights<-,SummarizedExperiment,numeric-method}
\alias{weights<-,ExpressionSet,matrix-method}
\alias{weights<-,EList,matrix-method}
\alias{weights<-,EList,numeric-method}
\title{Get/Set weights}
\usage{
weights(object, ...)
\S4method{weights}{SummarizedExperiment}(object)
\S4method{weights}{ExpressionSet}(object)
\S4method{weights}{EList}(object)
weights(object) <- value
\S4method{weights}{SummarizedExperiment,matrix}(object) <- value
\S4method{weights}{SummarizedExperiment,numeric}(object) <- value
\S4method{weights}{ExpressionSet,matrix}(object) <- value
\S4method{weights}{EList,matrix}(object) <- value
\S4method{weights}{EList,numeric}(object) <- value
}
\arguments{
\item{object}{SummarizedExperiment}
\item{...}{addtional params}
\item{value}{ratio matrix (features x samples)}
}
\value{
weight matrix (get) or updated object (set)
}
\description{
Get/Set weight matrix
}
\examples{
if (require(autonomics.data)){
require(magrittr)
object <- autonomics.data::stemcomp.proteinratios
weights(object)
weights(object) <- 1; weights(object) \%>\% str()
}
}
|
eca01d653d08377837cf95466353278461892954
|
e71a0417252bd1c7ddd947eca167e34cb4ddb9b4
|
/R/graphics/boxplot.R
|
bd55967591db9b870b999b6881f8b313ffb39de6
|
[] |
no_license
|
stork119/OSigA
|
902c0151cdba8c9f9c3297240d95343fc63e189f
|
3a9cc4d096f5b1744931a0326c1ed18547798fcb
|
refs/heads/master
| 2022-12-27T19:24:18.264417
| 2020-10-11T17:47:18
| 2020-10-11T17:47:18
| 78,741,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,396
|
r
|
boxplot.R
|
### ###
### boxplot
### ###
source("R/graphics/theme_jetka.R")
#### plot_boxplot_group ####
plot_boxplot_group <- function(data,
...,
output_path = NULL,
filename = NULL,
x = "time",
y = "intensity",
boxplot_group = x,
facet_grid_group_y = "",
facet_grid_group_x = "",
ylab = y,
xlab = x,
ylim_min = 0,
ylim_max = 2000,
plot_width = 24,
plot_height = 8,
plot_title = "",
xlab_angle = 90,
xlab_hjust = 0,
legend_position = "bottom",
plot_fun = "geom_boxplot",
normalize_data = FALSE,
normalize_factor = 65535,
ylim_max_const = TRUE,
x_factor = TRUE,
save_plot = TRUE){
CheckColumnExistence <- function(data, columns.list = list()){
columns_existance <- (unlist(columns.list) %in% colnames(data))
if_exists <- sum(!columns_existance) == 0
if(!if_exists){
print(unlist(columns.list)[-which(columns_existance)])
}
return(if_exists)
}
ylim_min <- as.integer(ylim_min)
ylim_max <- as.integer(ylim_max)
plot_width <- as.integer(plot_width)
plot_height <- as.integer(plot_height)
xlab_angle <- as.integer(xlab_angle)
xlab_hjust <- as.integer(xlab_hjust)
normalize_data <- as.integer(normalize_data)
normalize_factor <- as.integer(normalize_factor)
ylim_max_const <- as.integer(ylim_max_const)
x_factor <- as.integer(x_factor)
if(!CheckColumnExistence(data = data, list(x,y,boxplot_group))){
return()
}
if(normalize_data){
data[,y] <- normalize_factor*data[,y]
}
if(!ylim_max_const){
ylim_max <- 1.2*max(data[,y])
}
if(x_factor){
data[,x] <- factor(data[,x])
}
# data$bg <- factor(sapply(1:nrow(data),function(i){paste(as.character(data[i,boxplot_group]), sep = " ", collapse = " ")}))
gplot <- ggplot(data = data,
aes_string(x = x,
y = y,
group = boxplot_group)
) +
do.call(plot_fun, args = list(position = position_dodge())) +
ylim(ylim_min, ylim_max) +
xlab(xlab) +
ylab(ylab) +
ggtitle(plot_title) +
theme_jetka(...)
if(facet_grid_group_x != "" || facet_grid_group_y != ""){
gplot <-
gplot +
facet_grid(paste(facet_grid_group_x, "~", facet_grid_group_y, sep = " "),
scale ="free",
space = "free")
}
try({
if(save_plot){
output_path <- normalizePath(output_path, "/")
dir.create(path = output_path, recursive = TRUE, showWarnings = FALSE)
ggsave(filename = paste(output_path, "/", filename, ".pdf", sep = ""),
plot = gplot,
width = plot_width,
height = plot_height,
useDingbats = FALSE)
}
})
return(gplot)
}
|
3f3698a23df1bd312b36ac41191c5c5d1a09bdfd
|
f50fe7066d8d3f5551b01cde49159e136ac12510
|
/R/mln.mean.sd.R
|
545941fd7621e9390220e972b3c76e4950a96eda
|
[] |
no_license
|
stmcg/estmeansd
|
dd1597769db0d60f5c36414965187e0c648eb4d2
|
68328422dfce0b5f2f5ac19e216fb5ea3851d43d
|
refs/heads/master
| 2022-07-25T13:36:23.253555
| 2022-05-16T23:46:15
| 2022-06-17T18:34:58
| 170,224,473
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,815
|
r
|
mln.mean.sd.R
|
#' Method for unknown non-normal distributions (MLN) approach for estimating the sample mean and standard deviation
#'
#' This function applies the Method for Unknown Non-Normal Distributions (MLN) approach to estimate the sample mean and standard deviation from a study that presents one of the following sets of summary statistics: \itemize{
#' \item S1: median, minimum and maximum values, and sample size
#' \item S2: median, first and third quartiles, and sample size
#' \item S3: median, minimum and maximum values, first and third quartiles, and sample size
#' }
#'
#' Like the Box-Cox method of McGrath et al. (2020), the MLN method of Cai et al. (2021) assumes that the underlying distribution is normal after applying a suitable Box-Cox transformation with power parameter \eqn{\lambda}. Specifically, the MLN method consists of the following steps, outlined below.
#'
#' First, a maximum likelihood approach is used to estimate the power parameter \eqn{\lambda}, where the methods of Luo et al. (2016) and Wan et al. (2014) are applied to estimate the mean and standard deviation of the distribution of the transformed data. Then, a second round estimate of the mean and standard deviation of the distribution of the transformed data is obtained by maximum likelihood estimation conditional on the estimated power parameter. Finally, the inverse transformation is applied to estimate the sample mean and standard deviation of the original, untransformed data.
#'
#' @param min.val numeric value giving the sample minimum.
#' @param q1.val numeric value giving the sample first quartile.
#' @param med.val numeric value giving the sample median.
#' @param q3.val numeric value giving the sample third quartile.
#' @param max.val numeric value giving the sample maximum.
#' @param n numeric value giving the sample size.
#'
#' @return A object of class \code{mln.mean.sd}. The object is a list with the following components:
#' \item{est.mean}{Estimated sample mean.}
#' \item{est.sd}{Estimated sample standard deviation.}
#' \item{location}{Estimated mean of the Box-Cox transformed data.}
#' \item{scale}{Estimated standard deviation of the Box-Cox transformed data.}
#' \item{shape}{Estimated transformation parameter \eqn{\lambda}.}
#' \item{bc.norm.rvs}{The random variables generated by the Box-Cox (or, equivalently, power-normal) distribution during the Monte Carlo simulation.}
#' \item{...}{Some additional elements.}
#'
#' The results are printed with the \code{\link{print.mln.mean.sd}} function.
#'
#' @examples
#' ## Generate S2 summary data
#' set.seed(1)
#' n <- 100
#' x <- stats::rlnorm(n, 2.5, 1)
#' quants <- stats::quantile(x, probs = c(0.25, 0.5, 0.75))
#' obs.mean <- mean(x)
#' obs.sd <- stats::sd(x)
#'
#' ## Estimate the sample mean and standard deviation using the MLN method
#' mln.mean.sd(q1.val = quants[1], med.val = quants[2], q3.val = quants[3],
#' n = n)
#'
#' @references Cai S., Zhou J., and Pan J. (2021). Estimating the sample mean and standard deviation from order statistics and sample size in meta-analysis. \emph{Statistical Methods in Medical Research}. \strong{30}(12):2701-2719.
#' @references McGrath S., Zhao X., Steele R., Thombs B.D., Benedetti A., and the DEPRESsion Screening Data (DEPRESSD) Collaboration. (2020). Estimating the sample mean and standard deviation from commonly reported quantiles in meta-analysis. \emph{Statistical Methods in Medical Research}. \strong{29}(9):2520-2537.
#' @references Box G.E.P., and D.R. Cox. (1964). An analysis of transformations. \emph{Journal of the Royal Statistical Society Series B}. \strong{26}(2):211-52.
#' @references Luo D., Wan X., Liu J., and Tong T. (2016). Optimally estimating the sample mean from the sample size, median, mid-range, and/or mid-quartile range. \emph{Statistical Methods in Medical Research}. \strong{27}(6):1785-805
#' @references Wan X., Wang W., Liu J., and Tong T. (2014). Estimating the sample mean and standard deviation from the sample size, median, range and/or interquartile range. \emph{BMC Medical Research Methodology}. \strong{14}:135.
#' @export
mln.mean.sd <- function(min.val, q1.val, med.val, q3.val, max.val, n) {
args <- as.list(environment())
scenario <- get.scenario(min.val = min.val, q1.val = q1.val, med.val = med.val,
q3.val = q3.val, max.val = max.val)
check_errors(min.val = min.val, q1.val = q1.val, med.val = med.val,
q3.val = q3.val, max.val = max.val, n = n, scenario = scenario)
h <- floor(0.25 * n + 1)
j <- floor(0.5 * n + 1)
k <- floor(0.75 * n + 1)
boxcoxtrans <- function(lambda, x) {
if (lambda == 0) {
return(log(x))
} else {
return((x^lambda - 1) / lambda)
}
}
logL.lambda <- function(lambda){
if (scenario == 'S1'){
min.val.lambda <- boxcoxtrans(lambda, min.val)
med.val.lambda <- boxcoxtrans(lambda, med.val)
max.val.lambda <- boxcoxtrans(lambda, max.val)
mu <- metaBLUE::Luo.mean(X = c(min.val.lambda, med.val.lambda, max.val.lambda), n = n, type = scenario)$muhat
sigma <- metaBLUE::Wan.std(X = c(min.val.lambda, med.val.lambda, max.val.lambda), n = n, type = scenario)$sigmahat
logL <- sum(stats::dnorm(c(min.val.lambda, med.val.lambda, max.val.lambda), mu, sigma, log = TRUE)) +
(j - 2) * log(stats::pnorm(med.val.lambda, mu, sigma)-stats::pnorm(min.val.lambda, mu, sigma)) +
(n - j - 1) * log(stats::pnorm(max.val.lambda, mu, sigma) - stats::pnorm(med.val.lambda, mu, sigma))
} else if (scenario == 'S2'){
q1.val.lambda <- boxcoxtrans(lambda, q1.val)
med.val.lambda <- boxcoxtrans(lambda, med.val)
q3.val.lambda <- boxcoxtrans(lambda, q3.val)
mu <- metaBLUE::Luo.mean(X = c(q1.val.lambda, med.val.lambda, q3.val.lambda), n = n, type = scenario)$muhat
sigma <- metaBLUE::Wan.std(X = c(q1.val.lambda, med.val.lambda, q3.val.lambda), n = n, type = scenario)$sigmahat
logL <- sum(stats::dnorm(c(q1.val.lambda, med.val.lambda, q3.val.lambda), mu, sigma, log = TRUE)) +
(h - 1) * stats::pnorm(q1.val.lambda, mu, sigma, log.p = TRUE) + (j - h - 1) * log(stats::pnorm(med.val.lambda, mu, sigma)-stats::pnorm(q1.val.lambda, mu, sigma)) +
(k - j - 1) * log(stats::pnorm(q3.val.lambda, mu, sigma) - stats::pnorm(med.val.lambda, mu, sigma)) + (n - k) * stats::pnorm(q3.val.lambda, mu, sigma, lower.tail = FALSE, log.p = TRUE)
} else if (scenario == 'S3'){
min.val.lambda <- boxcoxtrans(lambda, min.val)
q1.val.lambda <- boxcoxtrans(lambda, q1.val)
med.val.lambda <- boxcoxtrans(lambda, med.val)
q3.val.lambda <- boxcoxtrans(lambda, q3.val)
max.val.lambda <- boxcoxtrans(lambda, max.val)
mu <- metaBLUE::Luo.mean(X = c(min.val.lambda, q1.val.lambda, med.val.lambda, q3.val.lambda, max.val.lambda), n = n, type = scenario)$muhat
sigma <- metaBLUE::Wan.std(X = c(min.val.lambda, q1.val.lambda, med.val.lambda, q3.val.lambda, max.val.lambda), n = n, type = scenario)$sigmahat
logL <- sum(stats::dnorm(c(min.val.lambda, q1.val.lambda, med.val.lambda, q3.val.lambda, max.val.lambda), mu, sigma, log = TRUE)) +
(h - 2) * log(stats::pnorm(q1.val.lambda, mu, sigma) - stats::pnorm(min.val.lambda, mu, sigma)) + (j - h - 1) * log(stats::pnorm(med.val.lambda, mu, sigma) - stats::pnorm(q1.val.lambda, mu, sigma)) +
(k - j - 1)*log(stats::pnorm(q3.val.lambda, mu, sigma) - stats::pnorm(med.val.lambda, mu, sigma)) + (n - k - 1) * log(stats::pnorm(max.val.lambda, mu, sigma) - stats::pnorm(q3.val.lambda, mu, sigma))
}
return(logL)
}
#the MLE of lambda
opt <- tryCatch({suppressWarnings(stats::optimize(f = logL.lambda, interval = c(0, 10),
tol = 0.001, maximum = TRUE))},
error = NULL)
if (is.null(opt)) {
stop("Optimization algorithm for finding lambda did not converge.")
}
lambda.hat <-round.lambda(opt$maximum)
if (scenario == 'S1'){
min.val.lambda <- boxcoxtrans(lambda.hat, min.val)
med.val.lambda <- boxcoxtrans(lambda.hat, med.val)
max.val.lambda <- boxcoxtrans(lambda.hat, max.val)
} else if (scenario == 'S2'){
q1.val.lambda <- boxcoxtrans(lambda.hat, q1.val)
med.val.lambda <- boxcoxtrans(lambda.hat, med.val)
q3.val.lambda <- boxcoxtrans(lambda.hat, q3.val)
} else if (scenario == 'S3'){
min.val.lambda <- boxcoxtrans(lambda.hat, min.val)
q1.val.lambda <- boxcoxtrans(lambda.hat, q1.val)
med.val.lambda <- boxcoxtrans(lambda.hat, med.val)
q3.val.lambda <- boxcoxtrans(lambda.hat, q3.val)
max.val.lambda <- boxcoxtrans(lambda.hat, max.val)
}
#-------------------------------------------------------------------------------
#estimating mu and sigma after Box-Cox transformation
if (scenario == 'S1'){
quants <- c(min.val.lambda, med.val.lambda, max.val.lambda)
} else if (scenario == 'S2'){
quants <- c(q1.val.lambda, med.val.lambda, q3.val.lambda)
} else if (scenario == 'S3'){
quants <- c(min.val.lambda, q1.val.lambda, med.val.lambda, q3.val.lambda, max.val.lambda)
}
mean.LW <- metaBLUE::Luo.mean(X = quants, n = n, type = scenario)$muhat
sd.LW <- metaBLUE::Wan.std(X = quants, n = n, type = scenario)$sigmahat
logL <- function(theta){
mu <- theta[1]
sigma <- theta[2]
if (scenario == 'S1'){
logL <- sum(stats::dnorm(c(min.val.lambda, med.val.lambda, max.val.lambda), mu, sigma, log = TRUE)) +
(j - 2) * log(stats::pnorm(med.val.lambda, mu, sigma)-stats::pnorm(min.val.lambda, mu, sigma)) +
(n - j - 1) * log(stats::pnorm(max.val.lambda, mu, sigma) - stats::pnorm(med.val.lambda, mu, sigma))
} else if (scenario == 'S2'){
logL <- sum(stats::dnorm(c(q1.val.lambda, med.val.lambda, q3.val.lambda), mu, sigma, log = TRUE)) +
(h - 1) * stats::pnorm(q1.val.lambda, mu, sigma, log.p = TRUE) + (j - h - 1) * log(stats::pnorm(med.val.lambda, mu, sigma)-stats::pnorm(q1.val.lambda, mu, sigma)) +
(k - j - 1) * log(stats::pnorm(q3.val.lambda, mu, sigma) - stats::pnorm(med.val.lambda, mu, sigma)) + (n - k) * stats::pnorm(q3.val.lambda, mu, sigma, lower.tail = FALSE, log.p = TRUE)
} else if (scenario == 'S3'){
logL <- sum(stats::dnorm(c(min.val.lambda, q1.val.lambda, med.val.lambda, q3.val.lambda, max.val.lambda), mu, sigma, log = TRUE)) +
(h - 2) * log(stats::pnorm(q1.val.lambda, mu, sigma) - stats::pnorm(min.val.lambda, mu, sigma)) + (j - h - 1) * log(stats::pnorm(med.val.lambda, mu, sigma) - stats::pnorm(q1.val.lambda, mu, sigma)) +
(k - j - 1)*log(stats::pnorm(q3.val.lambda, mu, sigma) - stats::pnorm(med.val.lambda, mu, sigma)) + (n - k - 1) * log(stats::pnorm(max.val.lambda, mu, sigma) - stats::pnorm(q3.val.lambda, mu, sigma))
}
return(-logL)
}
est.MLE <- tryCatch({suppressWarnings(stats::optim(par = c(mean.LW, sd.LW),
fn = logL))},
error = NULL)
if (is.null(est.MLE)) {
stop("Optimization algorithm for finding the MLE of mu and sigma did not converge.")
}
mu.lambda <- est.MLE$par[1]
sigma.lambda <- est.MLE$par[2]
data.lambda <- stats::rnorm(10000, mu.lambda, sigma.lambda)
if (lambda.hat != 0){
data.lambda <- data.lambda[(data.lambda > - 1 / lambda.hat) & (data.lambda < 2 * mu.lambda + 1 / lambda.hat)]
}
if (lambda.hat == 0){
mln.norm.rvs <- exp(data.lambda)
} else {
mln.norm.rvs <- (lambda.hat * data.lambda + 1)^(1 / lambda.hat)
}
output <- list(est.mean = mean(mln.norm.rvs),
est.sd = stats::sd(mln.norm.rvs),
location = mu.lambda, scale = sigma.lambda,
shape = lambda.hat, mln.norm.rvs = mln.norm.rvs,
args = args, scenario = scenario)
class(output) <- "mln.mean.sd"
return(output)
}
|
cb2a95984d013cdd379d2dd942e06f20f2bb92cb
|
8c026eb8ce94d81cdfba0073fb2d8fc767cca4a1
|
/McSwan/man/linearize.Rd
|
ad6ecf0aef2d72e33c4d6e9517077190994050ab
|
[] |
no_license
|
sunyatin/McSwan
|
832018c82b3cecd354f3eb31af63e7de227162c2
|
b86869b56892bbdf4b250b012c808dbeae5becf3
|
refs/heads/master
| 2023-02-08T14:10:05.278919
| 2023-02-03T13:57:23
| 2023-02-03T13:57:23
| 76,260,197
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 440
|
rd
|
linearize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimension_reduction.R
\name{linearize}
\alias{linearize}
\title{Merge model-specific multiSFSs into one big matrix}
\usage{
linearize(sfs, LDA_sizePerModel = NULL)
}
\arguments{
\item{sfs}{a list of model-specific multiSFS matrices}
}
\value{
A list of two elements: a vector of model indices; a big matrix containing all merged multiSFSs.
}
\keyword{internal}
|
c88fc325ee6b3620d9461195610e02442838ff09
|
56f6db6b40c3252398c7c6fa9b1d2681a0032cdd
|
/code/jakobbossek.R
|
586a0cd3648a86a4bdf53c2f9a7d8b64e62f69f8
|
[] |
no_license
|
MattBixley/rogaine_tsp
|
d1cc4321303cfd3040106f987c842d78790e10f8
|
4016adbfa95e59064191a229accf6c94e279011e
|
refs/heads/master
| 2021-01-26T09:03:48.467085
| 2020-02-27T01:20:48
| 2020-02-27T01:20:48
| 243,396,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 693
|
r
|
jakobbossek.R
|
library(salesperson)
library(ggplot2)
set.seed(1)
x = generateClusteredNetwork(n.points = 200L, n.cluster = 3L)
res = runSolver("nn", x)
print(res)
print(autoplot(x, path = res$tour, close.path = TRUE))
#n order to run the exact CONCORDE TSP solver we first need to download the executable for our operating system. Say,
#the path to the executable is /path/to/concorde (on a linux system) or C:/path/to/concorde (on a windows system). We need to pass this path to runSolver in
#order to call CONCORDE.
res = runSolver("concorde", x, solver.path = "/path/to/concorde")
print(res)
print(autoplot(x, path = res$tour, close.path = TRUE))
devtools::install_github("jakobbossek/salesperson")
|
7ed249a911670186aad74b5bf796f7d876acd66b
|
6e92ce9aea94772c3e56e9360cc4e97b55c1f12f
|
/statistical_analysis_and_figures/plot_nton_status_by_length.R
|
a5d7c980f5dd227dbf05d3b5a66e5c4385e1e1b4
|
[] |
no_license
|
kosticlab/universe_of_genes_scripts
|
510a937bec424c4df14bead4c35388605f0d8e57
|
c04114b8e6fbcef41c86f44250eca19309f60718
|
refs/heads/master
| 2020-05-06T20:13:24.171948
| 2019-05-23T21:39:18
| 2019-05-23T21:39:18
| 180,227,956
| 11
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,618
|
r
|
plot_nton_status_by_length.R
|
###plot nton status length data
##20190221
#Tierney
library(ggplot2)
library(cowplot)
setwd('~/Dropbox (HMS)/orfletons/revisions/nton_status_by_length')
plotdata_withcoverage<-function(d,name){
#plot gene length distribution
pdf('./nton_status_contig_coverage_oral_logged.pdf')
ggplot(d,aes(x=FOLD.COVERAGE.CONTIG,fill=as.factor(GENE.NTON.STATUS)))+geom_histogram(stat='count',bins = 500)+scale_y_log10()+xlab('Contig coverage') + ylab('Frequency')+ggtitle(paste('Frequency of contig coverage by singleton status:',name))
dev.off()
}
plotdata<-function(d,name){
#plot gene length distribution
pdf(paste('./nton_status_gene_length_',name,'.pdf',sep=''))
ggplot(d,aes(x=as.numeric(as.character(GENE.LENGTH)),fill=as.factor(GENE.NTON.STATUS)))+geom_histogram(bins = 500)+xlab('Gene length')+scale_y_log10()+ylab('Frequency')+ggtitle(paste('Frequency of gene lengths by singleton status:',name))+theme(legend.position = "none")
dev.off()
#plot contig length distribution
pdf(paste('./nton_status_contig_length_',name,'.pdf',sep=''))
ggplot(d,aes(x=as.numeric(as.character(CONTIG.LENGTH)),fill=as.factor(GENE.NTON.STATUS)))+geom_histogram(bins = 500)+xlab('Contig length')+scale_y_log10() +ggtitle(paste('Frequency of contig lengths by singleton status:',name))+ylab('Frequency')+theme(legend.position = "none")
dev.off()
}
d=read.csv('./nton_by_gene_contig_length_with_coverage_data.csv')
plotdata_withcoverage(d,'oral_coverage')
d=read.csv('nton_by_gene_contig_length_gut_oral.csv')
plotdata(d,'oral')
d=read.csv('./nton_by_gene_contig_length_gut.csv')
plotdata(d,'gut')
|
b15e0cab172a0f72ec205a0edd9449bb19387669
|
7fd749dc1a52e201dfe433fa0da403414687f5c0
|
/man/esize_m.Rd
|
bc8e27f1cd534c94064397550fb23b26704ef34b
|
[] |
no_license
|
cran/r4lineups
|
a179b83afdc8b8c68ac812cd600e86bc0ece48e8
|
6753d4662d34ea39261878f9f0584788be7c23f7
|
refs/heads/master
| 2020-03-27T03:58:08.061355
| 2018-07-18T12:20:02
| 2018-07-18T12:20:02
| 145,902,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,077
|
rd
|
esize_m.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esize_m.R
\name{esize_m}
\alias{esize_m}
\title{Effective Size}
\usage{
esize_m(lineup_table, k, both = FALSE)
}
\arguments{
\item{lineup_table}{A table of lineup choices}
\item{k}{Number of members in lineup. Must be specified by user (scalar).}
\item{both}{Defaults to FALSE. Returns Tredoux's adjusted effective size estimate.
If TRUE, provides both Malpass's (1981) and Makpass's adjusted (see: Tredoux, 1998)
calculations of effective size.}
}
\value{
Malpass's original & adjusted estimates of effective size
}
\description{
Function for computing Effective Size
}
\details{
Reduces the size of a lineup from a (corrected) nominal starting
value by the degree to which members are, in sum, chosen below
the level of chance expectation.
}
\examples{
#Data:
lineup_vec <- round(runif(100, 1, 6))
#Call:
esize_m(lineup_vec, 6, both = TRUE)
esize_m(lineup_vec, 6)
}
\references{
Malpass, R. S. (1981). Effective size and defendant bias in
eyewitness identification lineups. \emph{Law and Human Behavior, 5}(4), 299-309.
Malpass, R. S., Tredoux, C., & McQuiston-Surrett, D. (2007). Lineup
construction and lineup fairness. In R. Lindsay, D. F. Ross, J. D. Read,
& M. P. Toglia (Eds.), \emph{Handbook of Eyewitness Psychology, Vol. 2: Memory for
people} (pp. 155-178). Mahwah, NJ: Lawrence Erlbaum Associates.
Tredoux, C. G. (1998). Statistical inference on measures of lineup fairness.
\emph{Law and Human Behavior, 22}(2), 217-237.
Tredoux, C. (1999). Statistical considerations when determining measures of
lineup size and lineup bias. \emph{Applied Cognitive Psychology}, 13, S9-S26.
Wells, G. L.,Leippe, M. R., & Ostrom, T. M. (1979). Guidelines for
empirically assessing the fairness of a lineup. \emph{Law and Human Behavior,
3}(4), 285-293.
}
|
96922861bd25fc9215e3a062b747156cf03301f6
|
8910a2a6bfce064ce3ddaf4d76589174dc4e2a1a
|
/src/sports/baseball/get_current_round.R
|
05dd33da49d71127fd369a351bb7cee7dfb8f9ca
|
[] |
no_license
|
zmalosh/Atlantis
|
552d8b3f3fdb67086e68954fd821f4429c829b86
|
bddddcc9f1715b4e826b2e184d9c2db70fd1f649
|
refs/heads/master
| 2021-05-19T14:32:46.142479
| 2020-08-30T15:28:52
| 2020-08-30T15:28:52
| 251,758,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 628
|
r
|
get_current_round.R
|
get_current_round <- function(leagueId){
source('src/sports/baseball/get_league_games.R')
games <- get_league_games(leagueId)
gameDates <- games$Round %>% stringr::str_replace_all('_', '-') %>% ymd() %>% unique()
currentDate <- date(lubridate::now())
maxDate <- max(gameDates)
isAllPast <- maxDate < currentDate
if(isAllPast){
return(maxDate)
}
minDate <- min(gameDates)
isAllFuture <- minDate > currentDate
if(isAllFuture){
return(minDate)
}
nextGame <- (games %>% filter(is.na(HomeScore) & is.na(AwayScore)) %>% arrange(GameTime) %>% slice(1))[1,]
currentRound <- nextGame$Round
return(currentRound)
}
|
93b0fdfccfbe5da259af295b440fa4bf0fae1a91
|
0d0d1f5189aaa7112a725e9eb06f6b3386b52538
|
/LCPfunction.R
|
dfc63c7f38199c346ab75dc09a8cee46767192ad
|
[] |
no_license
|
adivea/LCP
|
e895e63fd6cde5bbd22336d20b5148e3baaa70f4
|
9a5b9650f62e10fd77fa8e74d32e7b51b5d57ecd
|
refs/heads/main
| 2023-04-21T09:56:26.825596
| 2021-05-15T05:57:35
| 2021-05-15T05:57:35
| 367,552,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,163
|
r
|
LCPfunction.R
|
calc.conductance <- function(raster, filename=NULL){
# get library
library(gdistance)
# calculate difference in elevation
heightDiff <- function(x){x[2] - x[1]}
# the difference is non-symetrical (going up is harder than going down)
hd <- transition(raster,heightDiff,8,symm=FALSE)
hd
# calculate slope
slope <- geoCorrection(hd, type = "r", scl=FALSE)
slope
# calculate adjacent cells
adj <- adjacent(r, cells=1:ncell(r), pairs=TRUE, directions=8)
# create a speed raster with Hiking function in meters per hour (meters are what the grids come in)
speed <- slope
speed[adj] <- 6 * 1000* exp(-3.5 * abs(slope[adj] + 0.05)) # meters per hour as all rasters are in m units
# Rectify the raster values on the basis of cell center distances
conductance <- geoCorrection(speed, type="r", scl = FALSE)
# Print result to pdf
ifelse(!dir.exists(file.path(".", "outputs")), dir.create(file.path(".", "outputs")), FALSE)
if(is.null(filename)){
pdf(paste0("outputs/",names(srtm),"conductance.pdf"))
plot(raster(conductance), main = paste0("Conductivity of", names(srtm), "surface in hours"))
dev.off()} else {
pdf(paste0("outputs/",filename,"conductance.pdf"))
plot(raster(conductance), main = paste0("Conductivity of", filename, "surface in hours"))
dev.off()
}
#Save the result
ifelse(!dir.exists(file.path(".", "output_data")), dir.create(file.path(".", "output_data")), FALSE)
if(is.null(filename)){
saveRDS(conductance, paste0("output_data/",names(srtm),"conductance.rds"))
}else{
saveRDS(conductance, paste0("output_data/",filename,"conductance.rds"))
}
}
x = c(-2, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 40)
y = c(36, 38, 40, 42, 44) # first batch
for(i in x){
for(e in y){
#Download a tile
cat("Longitude",i, "and Latitude",e)
srtm <- getData('SRTM', lon = i, lat=e)
## Aggregate just to test the behavior
srtm <- aggregate(srtm, fact = 10)
## Reclassify subzero values
rcl <- cbind(-9999, 1, NA)
r <- reclassify(srtm, rcl = rcl)
## Save the raster for future record ?
# saveRDS(r, paste0("output_data/",names(r),".rds"))
calc.conductance(r)
}
}
plot(raster(readRDS("output_data/srtm_45_05conductance.rds")))
pop=5000
travelcost.totown(cities, 5000)
travelcost.totown <- function(cities, pop){
library(sf)
library(gdistance)
local_citiesXk <- cities %>%
filter(pop_est > pop) %>%
st_as_sf(coords = c("Longitude (X)", "Latitude (Y)"),
crs = 4326) %>%
st_transform(crs = crs(r)) %>%
st_crop(r)
cost <- accCost(conductance, fromCoords = as(local_citiesXk, "Spatial"))
plot(y, main = paste0("cost of travel in hours in ",names(srtm)," between towns of population >",pop)); contour(y, add =TRUE) # should be in hours?
ifelse(!dir.exists(file.path(".", "output_data")), dir.create(file.path(".", "output_data")), FALSE)
saveRDS(cost, paste0("output_data/",names(srtm),"costtotown",pop,".rds"))
}
# Check the cities came through well
plot(raster(conductance)); plot(local_cities5k$geometry, add = TRUE)
crs(conductance)
|
7c72c340b2f6a818ade4b104c3c192f7420eac23
|
eb15ab937869ba62e2fd828fe65218efc2390840
|
/R/data_faker.R
|
a94f630af01b1a308c076ca9235a2d98f0db4e1c
|
[] |
no_license
|
madrury/r-data-faker
|
3a2c7b7c1c2976a2c12a566c49914601ed799bb7
|
8e2d7ca68bca6c6f5a7a54406a4cd435d3dbf33a
|
refs/heads/master
| 2021-01-10T07:45:12.767013
| 2015-12-21T17:55:39
| 2015-12-21T17:55:39
| 48,385,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,589
|
r
|
data_faker.R
|
.make_binary_matrix <- function(n_rows, n_columns) {
matrix(
sample(c(0, 1), n_rows*n_columns, replace=TRUE, prob=c(.5, .5)),
nrow=n_rows, ncol=n_columns
)
}
make_linmod_data <- function(n_rows, n_columns,
n_columns_with_large_effects=0,
n_nuisance_columns=0,
n_hidden_columns=0,
beta_sd=1,
beta_inflate_factor=2.5,
noise_sd=2.5) {
n_common_columns <- n_columns - n_nuisance_columns
if(n_common_columns < 0) {
stop("Illogical arguments: n_nuisance_columns > n_columns.")
}
if(n_columns_with_large_effects > n_common_columns) {
stop("Illogical arguments: n_columns_with_large_effects > number of effects.")
}
X_common <- make_binary_matrix(n_rows, n_common_columns)
X_nuisance <- make_binary_matrix(n_rows, n_nuisance_columns)
X_hidden <- make_binary_matrix(n_rows, n_hidden_columns)
beta_common <- rnorm(n_common_columns, mean=0, sd=beta_sd)
beta_common[1:n_columns_with_large_effects] <- (
beta_inflate_factor * beta_common[1:n_columns_with_large_effects])
beta_nuisance <- rnorm(n_nuisance_columns, mean=0, sd=beta_sd)
beta_hidden <- rnorm(n_hidden_columns, mean=0, sd=beta_sd)
Y <- (
cbind(X_common, X_hidden) %*% c(beta_common, beta_hidden)
+ rnorm(n_rows, mean=0, sd=noise_sd))
X <- cbind(X_common, X_nuisance)
list(X=X,
Y=Y,
beta_common=beta_common,
beta_nuisance=beta_nuisance,
beta_hidden=beta_hidden)
}
|
63d994f7ba7944000c54ea1017756950f1840fee
|
f43bf697b66f51d3807e39843911cf7ebd5c6f4d
|
/R/3.raster-based_analysis.R
|
9a76da8eccc1a6fac652b512e40e2512e5665f54
|
[
"CC0-1.0"
] |
permissive
|
dongmeic/suppression
|
06a9479861ded64e87b6764b96bc0bd89a7b9fcf
|
db97e818f62ef8bf42533882101fea6c309dc0e0
|
refs/heads/master
| 2021-01-23T05:20:13.957321
| 2020-12-20T05:11:13
| 2020-12-20T05:11:13
| 92,959,614
| 0
| 0
| null | 2020-04-03T16:54:55
| 2017-05-31T15:16:45
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,122
|
r
|
3.raster-based_analysis.R
|
## Dongmei CHEN
# objectives: raster-based analysis
# reference: 1. /Users/dongmeichen/Documents/scripts/beetle/r/fire_suppression/raster_scaling.R;
# 2. /Users/dongmeichen/GitHub/suppression/figures_maps_regression.R;
# 3. /Users/dongmeichen/GitHub/suppression/regression.R
# input: /Volumes/dongmeic/beetle/data/vector/spatial_join/fpa_sit_dist.shp;
# output: data frames or spatial data frames
# libraries
library(rgdal)
library(raster)
library(corrplot)
# data needed: fpa.sit.join, mpb.pts, fwfod_westus
load("~/Documents/writing/fire suppression/scripts/data_analysis.RData")
cell.size <- c(10000,25000,50000,100000,200000)
cellsize.lab <- c("10km", "25km", "50km", "100km", "200km")
xmin <- -1006739; xmax <- 1050000.0; ymin <- -1722656; ymax <- 539131.6
ndf <- data.frame(MPBAcre = numeric(0), LogMPB = numeric(0), Cost = numeric(0), LogCost = numeric(0),
BurnedArea = numeric(0), logBA = numeric(0), Duration = numeric(0), FireSize = numeric(0),
LogSize = numeric(0), FireDens = numeric(0), LogDens = numeric(0), Distance = numeric(0),
LogDist = numeric(0), CellSize = numeric(0))
varlist <- c("MPBAcres", "LogMPB", "Cost", "LogCost", "BurnedArea", "LogBA", "Duration", "FireSize", "LogSize", "FireDens", "LogDens", "Distance", "LogDist")
varlist.n <- c("LogMPB", "LogCost", "LogSize", "LogBA", "Duration", "LogDens", "LogDist")
for (i in 1:length(cell.size)){
ncols <- (xmax - xmin)/cell.size[i]; nrows <- (ymax - ymin)/cell.size[i]
r <- raster(nrows=nrows, ncols=ncols, ext=extent(mpb10km),crs = crs)
fire.dens <- rasterize(fwfod_westus, r, "FIREID", fun='count', na.rm=TRUE)
fire.acre <- rasterize(fpa.sit.join, r, "Acres", fun=sum, na.rm=TRUE)
fire.size <- rasterize(fpa.sit.join, r, "FIRE_SIZE", fun=mean, na.rm=TRUE)
cost <- rasterize(fpa.sit.join, r, "Costs", fun=sum, na.rm=TRUE)
duration <- rasterize(fpa.sit.join, r, "Duration", fun=mean, na.rm=TRUE)
distance <- rasterize(fpa.sit.join, r, "dist", fun=mean, na.rm=TRUE)
btl.acre <- rasterize(mpb.pts, r, "ORIG_FID", fun='count', na.rm=TRUE)
df <- as.data.frame(cbind(getValues(cost), getValues(fire.acre), getValues(duration), getValues(fire.size), getValues(fire.dens), getValues(distance), getValues(btl.acre)))
colnames(df) <- c("Cost", "BurnedArea","Duration", "FireSize", "FireDens", "Distance", "MPBAcres")
df$LogCost <- log(df$Cost)
df$LogBA <- log(df$BurnedArea)
df$LogMPB <- log(df$MPBAcres)
df$LogSize <- log(df$FireSize)
df$LogDens <- log(df$FireDens)
df$LogDist <- log(df$Distance)
df<- df[varlist]
df.n <- df[df$LogMPB != -Inf &
df$LogCost != -Inf &
df$LogBA != -Inf &
df$LogSize != -Inf &
df$LogDens != -Inf &
df$LogDist != -Inf,]
df.n <- na.omit(df.n)
df.n <- df.n[varlist.n]
# png(paste("raster_", cellsize.lab[i], ".png", sep = ""), width=12, height=8, units="in", res=300)
# par(mfrow=c(1,1),xpd=FALSE,mar=c(4,4,2,2.5))
# plot(df.n, cex=0.2, main=paste("Spatial resolution:", cellsize.lab[i]))
# dev.off()
df.n$CellSize <- cell.size[i]
ndf <- rbind(ndf, df.n)
print(paste(cell.size[i], "Done!"))
# print(paste("Results for spatial resolution:", cellsize.lab[i]))
# M <- cor(df.n)
# corrplot(M, order="AOE", cl.pos="b", tl.pos="d", tl.srt=60)
# res <- cor.mtest(df.n,0.95)
# png(paste("corr_", cellsize.lab[i], ".png", sep = ""), width=8, height=6, units="in", res=300)
# par(mfrow=c(1,1),xpd=FALSE,mar=c(1,1,1,1))
# corrplot.mixed(M, p.mat = res[[1]], sig.level=0.05)
# dev.off()
# mlr <- lm(LogMPB ~ ., data=df.n)
# #mlr <- lm(LogMPB ~ LogCost + Duration + LogSize + LogDens + LogDist, data=df.n)
# summary(mlr)
# layout(matrix(c(1,2,3,4),2,2))
# plot(mlr, cex=0.2)
# gvmodel <- gvlma(mlr)
# summary(gvmodel)
# step <- stepAIC(mlr, direction="both")
# print(step$anova)
}
write.csv(ndf, "/Users/dongmeichen/Documents/writing/fire suppression/output/v4/df/raster_scaling_point_df.csv", row.names = FALSE)
save.image("~/Documents/writing/fire suppression/scripts/data_analysis.RData")
|
4880764c055fdb48f29083dabc598d0f09f06d80
|
6e1144258a10d87fde18712f8817e39dd2b3a604
|
/mycarto.R
|
60a089b41ccae9acb7e3e3ff01719bed7b6b2d0b
|
[] |
no_license
|
jameswoodcock/main-sort-analysis
|
d07ad00ad4d993d36733ad30767c74fc1f8048dc
|
532c3faff06a4fcbda3fb6cf731f5f0eafa5976f
|
refs/heads/master
| 2021-01-01T05:59:54.937120
| 2015-07-08T20:23:27
| 2015-07-08T20:23:27
| 28,271,327
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,213
|
r
|
mycarto.R
|
mycarto <- function (Mat, MatH, level = 0, regmod = 1, coord = c(1, 2),
asp = 1, cex = 1.3, col = "steelblue4", font = 2, clabel = 0.8,
label.j = FALSE, resolution = 200, nb.clusters = 0, graph.tree = TRUE,
graph.corr = TRUE, graph.carto = TRUE, main = NULL, col.min = 7.5,
col.max = 0)
{
cm.colors2 = function(n, alpha = 1) {
if ((n <- as.integer(n[1L])) > 0) {
even.n <- n%%2 == 0
k <- n%/%2
l1 <- k + 1 - even.n
l2 <- n - k + even.n
c(if (l1 > 0) hsv(h = col.min/12, s = seq.int(0.8,
ifelse(even.n, 0.5/k, 0), length.out = l1), v = 1,
alpha = alpha), if (l2 > 1) hsv(h = col.max/12,
s = seq.int(0, 0.8, length.out = l2)[-1L], v = 1,
alpha = alpha))
}
else character(0L)
}
predire <- function(n1, n2, coeff) {
coeff[1] + coeff[2] * n1 + coeff[3] * n2 + coeff[4] *
n1 * n1 + coeff[5] * n2 * n2 + coeff[6] * n1 * n2
}
if (!is.data.frame(MatH))
stop("Non convenient selection for MatH")
if (any(is.na(MatH))) {
missing <- which(is.na(MatH))
MatH[missing] <- (matrix(rep(apply(MatH, 1, mean, na.rm = T),
ncol(MatH)), ncol = ncol(MatH)) + matrix(rep(apply(MatH,
2, mean, na.rm = T), each = nrow(MatH)), ncol = ncol(MatH)) -
matrix(rep(mean(MatH, na.rm = TRUE), ncol(MatH) *
nrow(MatH)), ncol = ncol(MatH)))[missing]
}
matrice <- cbind(row.names(MatH), Mat[rownames(MatH), ],
MatH)
classif <- cluster::agnes(dist(t(MatH)), method = "ward")
if (graph.tree) {
dev.new()
plot(classif, main = "Cluster Dendrogram", xlab = "Panelists",
which.plots = 2)
}
if (nb.clusters == 0) {
classif2 <- as.hclust(classif)
nb.clusters = which.max(rev(diff(classif2$height))) +
1
}
aux = kmeans(t(MatH), centers = nb.clusters)$cluster
mat <- matrix(0, nb.clusters, nrow(MatH))
dimnames(mat) <- list(1:nb.clusters, rownames(MatH))
for (i in 1:nb.clusters) {
mat[i, ] <- apply(t(MatH[, aux == i]), 2, mean)
rownames(mat)[i] <- paste("cluster", i)
}
ab = cor(t(mat), matrice[, 2:3], use = "pairwise.complete.obs")
aa = cor(matrice[, 4:ncol(matrice)], matrice[, 2:3], use = "pairwise.complete.obs")
if (graph.corr) {
dev.new()
plot(0, 0, xlab = paste("Dim", coord[1]), ylab = paste("Dim",
coord[2]), xlim = c(-1, 1), ylim = c(-1, 1), col = "white",
asp = 1, main = "Correlation circle")
x.cercle <- seq(-1, 1, by = 0.01)
y.cercle <- sqrt(1 - x.cercle^2)
lines(x.cercle, y = y.cercle)
lines(x.cercle, y = -y.cercle)
abline(v = 0, lty = 2, cex = cex)
abline(h = 0, lty = 2, cex = cex)
for (v in 1:nrow(aa)) {
arrows(0, 0, aa[v, 1], aa[v, 2], length = 0.1, angle = 15,
code = 2, lty = 2)
if (label.j) {
if (aa[v, 1] >= 0)
pos <- 4
else pos <- 2
text(aa[v, 1], y = aa[v, 2], labels = rownames(aa)[v],
pos = pos, offset = 0.2)
}
}
for (v in 1:nrow(ab)) {
arrows(0, 0, ab[v, 1], ab[v, 2], length = 0.1, angle = 15,
code = 2, col = "blue")
if (ab[v, 1] >= 0)
pos <- 4
else pos <- 2
text(ab[v, 1], y = ab[v, 2], labels = rownames(ab)[v],
pos = pos, offset = 0.2, col = "blue")
}
}
matrice[, 4:ncol(matrice)] <- scale(matrice[, 4:ncol(matrice)],
center = TRUE, scale = FALSE)[, ]
nbconso <- ncol(matrice) - 3
x1 <- matrice[, 2]
x2 <- matrice[, 3]
x12 <- scale(x1, center = TRUE, scale = FALSE)[, ]^2
x22 <- scale(x2, center = TRUE, scale = FALSE)[, ]^2
x12plusx22 <- x12 + x22
x3 <- scale(x1, center = TRUE, scale = FALSE)[, ] * scale(x2,
center = TRUE, scale = FALSE)[, ]
XX <- cbind(x1, x2, x12, x22, x3)
etendue.x1 <- diff(range(x1))
etendue.x2 <- diff(range(x2))
pas <- max(etendue.x1, etendue.x2)/resolution
f1 <- seq((min(x1) - etendue.x1 * 0.05), (max(x1) + etendue.x1 *
0.05), pas)
f2 <- seq((min(x2) - etendue.x2 * 0.05), (max(x2) + etendue.x2 *
0.05), pas)
depasse <- matrix(0, nrow = length(f1), ncol = length(f2))
abscis <- NULL
ordon <- NULL
for (i in 1:nbconso) {
if (regmod == 1)
coeff <- lm(matrice[, i + 3] ~ XX[, 1] + XX[, 2] +
XX[, 3] + XX[, 4] + XX[, 5], na.action = na.omit)$coef
if (regmod == 2) {
coeff <- lm(matrice[, i + 3] ~ XX[, 1] + XX[, 2],
na.action = na.omit)$coef
coeff <- c(coeff, 0, 0, 0)
}
if (regmod == 3) {
coeff <- lm(matrice[, i + 3] ~ x1 + x2 + x12plusx22,
na.action = na.omit)$coef
coeff <- c(coeff, coeff[4], 0)
}
if (regmod == 4) {
coeff <- lm(matrice[, i + 3] ~ XX[, 1] + XX[, 2] +
XX[, 3] + XX[, 4], na.action = na.omit)$coef
coeff <- c(coeff, 0)
}
predites <- outer(f1, f2, predire, coeff)
if (sd(as.vector(predites), na.rm = TRUE) != 0)
predites <- (predites - mean(predites, na.rm = TRUE))/sd(as.vector(predites),
na.rm = TRUE)
depasse <- depasse + matrix(as.numeric(predites > level),
nrow = length(f1), ncol = length(f2))
abscis <- c(abscis, f1[rev(order(predites))[1] - length(f1) *
as.integer((rev(order(predites))[1] - 0.5)/length(f1))])
ordon <- c(ordon, f2[as.integer(1 + (rev(order(predites))[1] -
0.5)/length(f1))])
}
nb.depasse <- depasse
depasse <- round(depasse/nbconso * 100)
dimnames(depasse) <- list(as.character(f1), as.character(f2))
if (graph.carto) {
dev.new()
col = cm.colors2(100)
if (is.null(main))
main = "Preference mapping"
image(f1, f2, depasse, col = col, xlab = paste("Dim",
coord[1]), ylab = paste("Dim", coord[2]), main = main,
font.main = font, , cex.main = cex, asp = asp)
contour(f1, f2, depasse, nlevels = 9, levels = c(20,
30, 40, 50, 60, 70, 80, 90, 95), add = TRUE, labex = 0)
for (i in 1:nrow(matrice)) {
points(matrice[i, 2], matrice[i, 3], pch = 15)
#text(matrice[i, 2], matrice[i, 3], matrice[i, 1],
#pos = 4, offset = 0.2, )
}
points(abscis, ordon, pch = 20)
}
don <- cbind.data.frame(as.factor(aux), t(MatH))
colnames(don) <- c("clusters", paste("Prod", rownames(MatH),
sep = "."))
#resdecat <- decat(don, formul = "~clusters", firstvar = 2,
#proba = 0.05, graph = FALSE)
res <- list()
res$clusters <- aux
#res$prod.clusters <- resdecat$resT
res$nb.depasse <- nb.depasse
res$f1 <- f1
res$f2 <- f2
res$abscis <- abscis
res$ordon <- ordon
res$matrice <- matrice
return(res)
}
|
e62c62946e37f36c11c921194fa5fa2cf2186d09
|
3b417786e77e03bef575db9eb4558fd52f026e1b
|
/plot2.R
|
03f625b1c5400229f9edab6650809292ee8f2d02
|
[] |
no_license
|
r-datascience/exploratory_data_analysis
|
4984106ac9613468611a816b14923e4c01de3951
|
a291b839f7e892f00a2e06dbe9bee4ba3011f088
|
refs/heads/master
| 2021-05-28T07:51:41.343480
| 2015-02-08T23:40:09
| 2015-02-08T23:40:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
plot2.R
|
data<-read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
data$as.Date<-strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
data<- data[data$Date >= "2007-02-01" & data$Date <= "2007-02-02",]
plot(data$as.Date, data$Global_active_power,type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png,file="plot2.png",width = 480, height = 480)
dev.off()
|
5bab5a18395c11c9122a05b63b0dfa079f42e122
|
24edba72b6483d25a0c4167fc70c25e27c76a86f
|
/R/GenMatching.R
|
536f8dd881e420610083d71ed4974d09d5c18315
|
[] |
no_license
|
JasjeetSekhon/Matching
|
aaea3c27bc863f07bfc93ee10bfd4ac066b59d02
|
55226a53ea324b428b3d6090d865c025b0e7bb35
|
refs/heads/master
| 2022-11-15T17:32:38.337362
| 2022-11-08T00:58:04
| 2022-11-08T00:58:04
| 132,082,360
| 19
| 5
| null | 2020-02-05T23:00:03
| 2018-05-04T03:39:44
|
C
|
UTF-8
|
R
| false
| false
| 38,031
|
r
|
GenMatching.R
|
FastMatchC <- function(N, xvars, All, M, cdd, ww, Tr, Xmod, weights)
{
ret <- .Call("FastMatchC", as.integer(N), as.integer(xvars), as.integer(All), as.integer(M),
as.double(cdd), as.double(ww), as.double(Tr),
as.double(Xmod), as.double(weights),
PACKAGE="Matching")
return(ret)
}
MatchGenoudStage1 <- function(Tr=Tr, X=X, All=All, M=M, weights=weights,
tolerance)
{
N <- nrow(X)
xvars <- ncol(X)
# if SATC is to be estimated the treatment indicator is reversed
if (All==2)
Tr <- 1-Tr
# check on the number of matches, to make sure the number is within the limits
# feasible given the number of observations in both groups.
if (All==1)
{
M <- min(M,min(sum(Tr),sum(1-Tr)));
} else {
M <- min(M,sum(1-Tr));
}
# I.c. normalize regressors to have mean zero and unit variance.
# If the standard deviation of a variable is zero, its normalization
# leads to a variable with all zeros.
Mu.X <- matrix(0, xvars, 1)
Sig.X <- matrix(0, xvars, 1)
weights.sum <- sum(weights)
for (k in 1:xvars)
{
Mu.X[k,1] <- sum(X[,k]*weights)/weights.sum;
eps <- X[,k]-Mu.X[k,1]
Sig.X[k,1] <- sqrt(max(tolerance, sum(X[,k]*X[,k]*weights))/weights.sum-Mu.X[k,1]^2)
Sig.X[k,1] <- Sig.X[k,1]*sqrt(N/(N-1))
if(Sig.X[k,1] < tolerance)
Sig.X[k,1] <- tolerance
X[,k]=eps/Sig.X[k,1]
} #end of k loop
ret <- list(Tr=Tr, X=X, All=All, M=M, N=N)
return(ret)
} #end of MatchGenoudStage1
###############################################################################
## For Caliper!
##
###############################################################################
MatchGenoudStage1caliper <- function(Tr=Tr, X=X, All=All, M=M, weights=weights,
exact=exact, caliper=caliper,
distance.tolerance, tolerance)
{
N <- nrow(X)
xvars <- ncol(X)
weights.orig <- as.matrix(weights)
if (!is.null(exact))
{
exact = as.vector(exact)
nexacts = length(exact)
if ( (nexacts > 1) & (nexacts != xvars) )
{
warning("length of exact != ncol(X). Ignoring exact option")
exact <- NULL
} else if (nexacts==1 & (xvars > 1) ){
exact <- rep(exact, xvars)
}
}
if (!is.null(caliper))
{
caliper = as.vector(caliper)
ncalipers = length(caliper)
if ( (ncalipers > 1) & (ncalipers != xvars) )
{
warning("length of caliper != ncol(X). Ignoring caliper option")
caliper <- NULL
} else if (ncalipers==1 & (xvars > 1) ){
caliper <- rep(caliper, xvars)
}
}
if (!is.null(caliper))
{
ecaliper <- vector(mode="numeric", length=xvars)
sweights <- sum(weights.orig)
for (i in 1:xvars)
{
meanX <- sum( X[,i]*weights.orig )/sweights
sdX <- sqrt(sum( (X[,i]-meanX)^2 )/sweights)
ecaliper[i] <- caliper[i]*sdX
}
} else {
ecaliper <- NULL
}
if (!is.null(exact))
{
if(is.null(caliper))
{
max.diff <- abs(max(X)-min(X) + distance.tolerance * 100)
ecaliper <- matrix(max.diff, nrow=xvars, ncol=1)
}
for (i in 1:xvars)
{
if (exact[i])
ecaliper[i] <- distance.tolerance;
}
}
# if SATC is to be estimated the treatment indicator is reversed
if (All==2)
Tr <- 1-Tr
# check on the number of matches, to make sure the number is within the limits
# feasible given the number of observations in both groups.
if (All==1)
{
M <- min(M,min(sum(Tr),sum(1-Tr)));
} else {
M <- min(M,sum(1-Tr));
}
# I.c. normalize regressors to have mean zero and unit variance.
# If the standard deviation of a variable is zero, its normalization
# leads to a variable with all zeros.
Mu.X <- matrix(0, xvars, 1)
Sig.X <- matrix(0, xvars, 1)
weights.sum <- sum(weights)
for (k in 1:xvars)
{
Mu.X[k,1] <- sum(X[,k]*weights)/weights.sum;
eps <- X[,k]-Mu.X[k,1]
Sig.X[k,1] <- sqrt(max(tolerance, sum(X[,k]*X[,k]*weights))/weights.sum-Mu.X[k,1]^2)
Sig.X[k,1] <- Sig.X[k,1]*sqrt(N/(N-1))
if(Sig.X[k,1] < tolerance)
Sig.X[k,1] <- tolerance
X[,k]=eps/Sig.X[k,1]
} #end of k loop
ret <- list(Tr=Tr, X=X, All=All, M=M, N=N, ecaliper=ecaliper)
return(ret)
} #end of MatchGenoudStage1caliper
###############################################################################
## GenMatch
##
###############################################################################
GenMatch <- function(Tr, X, BalanceMatrix=X, estimand="ATT", M=1,
weights=NULL,
pop.size = 100, max.generations=100,
wait.generations=4, hard.generation.limit=FALSE,
starting.values=rep(1,ncol(X)),
fit.func="pvals",
MemoryMatrix=TRUE,
exact=NULL, caliper=NULL, replace=TRUE, ties=TRUE,
CommonSupport=FALSE,nboots=0, ks=TRUE, verbose=FALSE,
distance.tolerance=0.00001,
tolerance=sqrt(.Machine$double.eps),
min.weight=0,
max.weight=1000,
Domains=NULL,
print.level=2,
project.path=NULL,
paired=TRUE,
loss=1,
data.type.integer=FALSE,
restrict=NULL,
cluster=FALSE,
balance=TRUE, ...)
{
requireNamespace("rgenoud")
Tr <- as.double(Tr)
X <- as.matrix(X)
BalanceMatrix <- as.matrix(BalanceMatrix)
if(length(Tr) != nrow(X))
{
stop("length(Tr) != nrow(X)")
}
if(!is.function(fit.func))
{
if(nrow(BalanceMatrix) != length(Tr))
{
stop("nrow(BalanceMatrix) != length(Tr)")
}
}
if (is.null(weights))
{
weights <- rep(1,length(Tr))
weights.flag <- FALSE
} else {
weights.flag <- TRUE
weights <- as.double(weights)
if( length(Tr) != length(weights))
{
stop("length(Tr) != length(weights)")
}
}
isna <- sum(is.na(Tr)) + sum(is.na(X)) + sum(is.na(weights)) + sum(is.na(BalanceMatrix))
if (isna!=0)
{
stop("GenMatch(): input includes NAs")
return(invisible(NULL))
}
#check inputs
if (sum(Tr !=1 & Tr !=0) > 0) {
stop("Treatment indicator must be a logical variable---i.e., TRUE (1) or FALSE (0)")
}
if (var(Tr)==0) {
stop("Treatment indicator ('Tr') must contain both treatment and control observations")
}
if (distance.tolerance < 0)
{
warning("User set 'distance.tolerance' to less than 0. Resetting to the default which is 0.00001.")
distance.tolerance <- 0.00001
}
#CommonSupport
if (CommonSupport !=1 & CommonSupport !=0) {
stop("'CommonSupport' must be a logical variable---i.e., TRUE (1) or FALSE (0)")
}
if(CommonSupport==TRUE)
{
tr.min <- min(X[Tr==1,1])
tr.max <- max(X[Tr==1,1])
co.min <- min(X[Tr==0,1])
co.max <- max(X[Tr==0,1])
if(tr.min >= co.min)
{
indx1 <- X[,1] < (tr.min-distance.tolerance)
} else {
indx1 <- X[,1] < (co.min-distance.tolerance)
}
if(co.max <= tr.max)
{
indx2 <- X[,1] > (co.max+distance.tolerance)
} else {
indx2 <- X[,1] > (tr.max+distance.tolerance)
}
indx3 <- indx1==0 & indx2==0
Tr <- as.double(Tr[indx3])
X <- as.matrix(X[indx3,])
BalanceMatrix <- as.matrix(BalanceMatrix[indx3,])
weights <- as.double(weights[indx3])
}#end of CommonSupport
if (pop.size < 0 | pop.size!=round(pop.size) )
{
warning("User set 'pop.size' to an illegal value. Resetting to the default which is 100.")
pop.size <- 100
}
if (max.generations < 0 | max.generations!=round(max.generations) )
{
warning("User set 'max.generations' to an illegal value. Resetting to the default which is 100.")
max.generations <-100
}
if (wait.generations < 0 | wait.generations!=round(wait.generations) )
{
warning("User set 'wait.generations' to an illegal value. Resetting to the default which is 4.")
wait.generations <- 4
}
if (hard.generation.limit != 0 & hard.generation.limit !=1 )
{
warning("User set 'hard.generation.limit' to an illegal value. Resetting to the default which is FALSE.")
hard.generation.limit <- FALSE
}
if (data.type.integer != 0 & data.type.integer !=1 )
{
warning("User set 'data.type.integer' to an illegal value. Resetting to the default which is TRUE.")
data.type.integer <- TRUE
}
if (MemoryMatrix != 0 & MemoryMatrix !=1 )
{
warning("User set 'MemoryMatrix' to an illegal value. Resetting to the default which is TRUE.")
MemoryMatrix <- TRUE
}
if (nboots < 0 | nboots!=round(nboots) )
{
warning("User set 'nboots' to an illegal value. Resetting to the default which is 0.")
nboots <- 0
}
if (ks != 0 & ks !=1 )
{
warning("User set 'ks' to an illegal value. Resetting to the default which is TRUE.")
ks <- TRUE
}
if (verbose != 0 & verbose !=1 )
{
warning("User set 'verbose' to an illegal value. Resetting to the default which is FALSE.")
verbose <- FALSE
}
if (min.weight < 0)
{
warning("User set 'min.weight' to an illegal value. Resetting to the default which is 0.")
min.weight <- 0
}
if (max.weight < 0)
{
warning("User set 'max.weight' to an illegal value. Resetting to the default which is 1000.")
max.weight <- 1000
}
if (print.level != 0 & print.level !=1 & print.level !=2 & print.level !=3)
{
warning("User set 'print.level' to an illegal value. Resetting to the default which is 2.")
print.level <- 2
}
if (paired != 0 & paired !=1 )
{
warning("User set 'paired' to an illegal value. Resetting to the default which is TRUE.")
paired <- FALSE
}
##from Match()
if (tolerance < 0)
{
warning("User set 'tolerance' to less than 0. Resetting to the default which is 0.00001.")
tolerance <- 0.00001
}
if (M < 1)
{
warning("User set 'M' to less than 1. Resetting to the default which is 1.")
M <- 1
}
if ( M!=round(M) )
{
warning("User set 'M' to an illegal value. Resetting to the default which is 1.")
M <- 1
}
if (replace!=FALSE & replace!=TRUE)
{
warning("'replace' must be TRUE or FALSE. Setting to TRUE")
replace <- TRUE
}
if(replace==FALSE)
ties <- FALSE
if (ties!=FALSE & ties!=TRUE)
{
warning("'ties' must be TRUE or FALSE. Setting to TRUE")
ties <- TRUE
}
#print warning if pop.size, max.generations and wait.generations are all set to their original values
if(pop.size==100 & max.generations==100 & wait.generations==4)
{
warning("The key tuning parameters for optimization were are all left at their default values. The 'pop.size' option in particular should probably be increased for optimal results. For details please see the help page and http://sekhon.berkeley.edu/papers/MatchingJSS.pdf")
}
#loss function
if (is.double(loss))
{
if (loss==1) {
loss.func=sort
lexical=ncol(BalanceMatrix)
if(ks)
lexical=lexical+lexical
} else if(loss==2) {
loss.func=min
lexical=0
} else{
stop("unknown loss function")
}
} else if (is.function(loss)) {
loss.func=loss
lexical=1
} else {
stop("unknown loss function")
}
#set lexical for fit.func
if (is.function(fit.func))
{
lexical = 1
} else if (fit.func=="qqmean.max" | fit.func=="qqmedian.max" | fit.func=="qqmax.max") {
lexical=ncol(BalanceMatrix)
} else if (fit.func!="qqmean.mean" & fit.func!="qqmean.max" &
fit.func!="qqmedian.median" & fit.func!="qqmedian.max"
& fit.func!="pvals") {
stop("invalid 'fit.func' argument")
} else if (!fit.func=="pvals") {
lexical = 0
}
if(replace==FALSE)
{
#replace==FALE, needs enough observation
#ATT
orig.weighted.control.nobs <- sum(weights[Tr!=1])
orig.weighted.treated.nobs <- sum(weights[Tr==1])
if(estimand=="ATC")
{
if (orig.weighted.treated.nobs < orig.weighted.control.nobs)
{
warning("replace==FALSE, but there are more (weighted) control obs than treated obs. Some obs will be dropped. You may want to estimate ATC instead")
}
} else if(estimand=="ATE")
{
#ATE
if (orig.weighted.treated.nobs > orig.weighted.control.nobs)
{
warning("replace==FALSE, but there are more (weighted) treated obs than control obs. Some treated obs will not be matched. You may want to estimate ATC instead.")
}
if (orig.weighted.treated.nobs < orig.weighted.control.nobs)
{
warning("replace==FALSE, but there are more (weighted) control obs than treated obs. Some control obs will not be matched. You may want to estimate ATT instead.")
}
} else {
#ATT
if (orig.weighted.treated.nobs > orig.weighted.control.nobs)
{
warning("replace==FALSE, but there are more (weighted) treated obs than control obs. Some treated obs will not be matched. You may want to estimate ATC instead.")
}
}
#we need a restrict matrix if we are going to not do replacement
if(is.null(restrict))
{
restrict <- t(as.matrix(c(0,0,0)))
}
}#end of replace==FALSE
#check the restrict matrix input
if(!is.null(restrict))
{
if(!is.matrix(restrict))
stop("'restrict' must be a matrix of restricted observations rows and three columns: c(i,j restriction)")
if(ncol(restrict)!=3 )
stop("'restrict' must be a matrix of restricted observations rows and three columns: c(i,j restriction)")
restrict.trigger <- TRUE
} else {
restrict.trigger <- FALSE
}
if(!is.null(caliper) | !is.null(exact) | restrict.trigger | !ties)
{
GenMatchCaliper.trigger <- TRUE
} else {
GenMatchCaliper.trigger <- FALSE
}
isunix <- .Platform$OS.type=="unix"
if (is.null(project.path))
{
if (print.level < 3 & isunix)
{
project.path="/dev/null"
} else {
project.path=paste(tempdir(),"/genoud.pro",sep="")
#work around for rgenoud bug
#if (print.level==3)
#print.level <- 2
}
}
nvars <- ncol(X)
balancevars <- ncol(BalanceMatrix)
if (is.null(Domains))
{
Domains <- matrix(min.weight, nrow=nvars, ncol=2)
Domains[,2] <- max.weight
} else {
indx <- (starting.values < Domains[,1]) | (starting.values > Domains[,2])
starting.values[indx] <- round( (Domains[indx,1]+Domains[indx,2])/2 )
}
# create All
if (estimand=="ATT")
{
All <- 0
} else if(estimand=="ATE") {
All <- 1
} else if(estimand=="ATC") {
All <- 2
} else {
All <- 0
warning("User set 'estimand' to an illegal value. Resetting to the default which is 'ATT'")
}
#stage 1 Match, only needs to be called once
if(!GenMatchCaliper.trigger)
{
s1 <- MatchGenoudStage1(Tr=Tr, X=X, All=All, M=M, weights=weights,
tolerance=tolerance);
s1.Tr <- s1$Tr
s1.X <- s1$X
s1.All <- s1$All
s1.M <- s1$M
s1.N <- s1$N
rm(s1)
} else {
s1 <- MatchGenoudStage1caliper(Tr=Tr, X=X, All=All, M=M, weights=weights,
exact=exact, caliper=caliper,
distance.tolerance=distance.tolerance,
tolerance=tolerance)
s1.Tr <- s1$Tr
s1.X <- s1$X
s1.All <- s1$All
s1.M <- s1$M
s1.N <- s1$N
s1.ecaliper <- s1$ecaliper
if (is.null(s1.ecaliper))
{
caliperFlag <- 0
Xorig <- 0
CaliperVec <- 0
} else {
caliperFlag <- 1
Xorig <- X
CaliperVec <- s1$ecaliper
}
rm(s1)
} #GenMatchCaliper.trigger
genoudfunc <- function(x)
{
wmatrix <- diag(x, nrow=nvars)
if ( min(eigen(wmatrix, symmetric=TRUE, only.values=TRUE)$values) < tolerance )
wmatrix <- wmatrix + diag(nvars)*tolerance
ww <- chol(wmatrix)
if(!GenMatchCaliper.trigger)
{
if (weights.flag==TRUE)
{
FastMatchC.internal <- function(N, xvars, All, M, cdd, ww, Tr, Xmod, weights)
{
ret <- .Call("FastMatchC", as.integer(N), as.integer(xvars), as.integer(All), as.integer(M),
as.double(cdd), as.double(ww), as.double(Tr),
as.double(Xmod), as.double(weights),
PACKAGE="Matching")
return(ret)
}
rr <- FastMatchC.internal(N=s1.N, xvars=nvars, All=s1.All, M=s1.M,
cdd=distance.tolerance, ww=ww, Tr=s1.Tr, Xmod=s1.X,
weights=weights)
} else {
FasterMatchC.internal <- function(N, xvars, All, M, cdd, ww, Tr, Xmod, weights)
{
ret <- .Call("FasterMatchC", as.integer(N), as.integer(xvars), as.integer(All), as.integer(M),
as.double(cdd), as.double(ww), as.double(Tr),
as.double(Xmod),
PACKAGE="Matching")
return(ret)
}
rr <- FasterMatchC.internal(N=s1.N, xvars=nvars, All=s1.All, M=s1.M,
cdd=distance.tolerance, ww=ww, Tr=s1.Tr, Xmod=s1.X)
} #end of weights.flag
} else {
if (weights.flag==TRUE)
{
MatchLoopC.internal <- function(N, xvars, All, M, cdd, caliperflag, replace, ties, ww, Tr, Xmod, weights, CaliperVec,
Xorig, restrict.trigger, restrict)
{
if(restrict.trigger)
{
restrict.nrow <- nrow(restrict)
} else {
restrict.nrow <- 0
}
ret <- .Call("MatchLoopC", as.integer(N), as.integer(xvars), as.integer(All), as.integer(M),
as.double(cdd), as.integer(caliperflag), as.integer(replace), as.integer(ties), as.double(ww), as.double(Tr),
as.double(Xmod), as.double(weights), as.double(CaliperVec), as.double(Xorig),
as.integer(restrict.trigger), as.integer(restrict.nrow), as.double(restrict),
#next line is sets the DiagWeightMatrixFlag
as.double(1),
PACKAGE="Matching")
return(ret)
} #end of MatchLoopC.internal
rr <- MatchLoopC.internal(N=s1.N, xvars=nvars, All=s1.All, M=s1.M,
cdd=distance.tolerance,
caliperflag=caliperFlag,
replace=replace, ties=ties,
ww=ww, Tr=s1.Tr, Xmod=s1.X, weights=weights,
CaliperVec=CaliperVec, Xorig=Xorig,
restrict.trigger=restrict.trigger, restrict=restrict)
} else {
MatchLoopCfast.internal <- function(N, xvars, All, M, cdd, caliperflag, replace, ties, ww, Tr, Xmod, CaliperVec, Xorig,
restrict.trigger, restrict)
{
if(restrict.trigger)
{
restrict.nrow <- nrow(restrict)
} else {
restrict.nrow <- 0
}
ret <- .Call("MatchLoopCfast", as.integer(N), as.integer(xvars), as.integer(All), as.integer(M),
as.double(cdd), as.integer(caliperflag), as.integer(replace), as.integer(ties), as.double(ww), as.double(Tr),
as.double(Xmod), as.double(CaliperVec), as.double(Xorig),
as.integer(restrict.trigger), as.integer(restrict.nrow), as.double(restrict),
#next line is the DiagWeightMatrixFlag
as.double(1),
PACKAGE="Matching")
return(ret)
} #end of MatchLoopCfast.internal
rr <- MatchLoopCfast.internal(N=s1.N, xvars=nvars, All=s1.All, M=s1.M,
cdd=distance.tolerance,
caliperflag=caliperFlag,
replace=replace, ties=ties,
ww=ww, Tr=s1.Tr, Xmod=s1.X,
CaliperVec=CaliperVec, Xorig=Xorig,
restrict.trigger=restrict.trigger, restrict=restrict)
} #end of weights.flag
#no matches
if(rr[1,1]==0) {
warning("no valid matches found in GenMatch evaluation")
return(rep(-9999, balancevars*2))
}
rr <- rr[,c(4,5,3)]
} #Caliper.Trigger
#should be the same as GenBalance() in GenBalance.R but we need to include it here because of
#cluster scoping issues.
GenBalance.internal <-
function(rr, X, nvars=ncol(X), nboots = 0, ks=TRUE, verbose = FALSE, paired=TRUE)
{
#CUT-AND-PASTE from GenBalance.R, the functions before GenBalance. but get rid of warn *switch*
MATCHpt <- function(q, df, ...)
{
#don't know how general it is so let's try to work around it.
ret=pt(q,df, ...)
if (is.na(ret)) {
ret <- pt(q, df, ...)
if(is.na(ret))
warning("pt() generated NaN. q:",q," df:",df,"\n",date())
}
return(ret)
} #end of MATCHpt
Mt.test.pvalue <- function(Tr, Co, weights)
{
v1 <- Tr-Co
estimate <- sum(v1*weights)/sum(weights)
var1 <- sum( ((v1-estimate)^2)*weights )/( sum(weights)*sum(weights) )
if (estimate==0 & var1==0)
{
return(1)
}
statistic <- estimate/sqrt(var1)
# p.value <- (1-pnorm(abs(statistic)))*2
p.value <- (1-MATCHpt(abs(statistic), df=sum(weights)-1))*2
return(p.value)
} #end of Mt.test.pvalue
Mt.test.unpaired.pvalue <- function(Tr, Co, weights)
{
obs <- sum(weights)
mean.Tr <- sum(Tr*weights)/obs
mean.Co <- sum(Co*weights)/obs
estimate <- mean.Tr-mean.Co
var.Tr <- sum( ( (Tr - mean.Tr)^2 )*weights)/(obs-1)
var.Co <- sum( ( (Co - mean.Co)^2 )*weights)/(obs-1)
dim <- sqrt(var.Tr/obs + var.Co/obs)
if (estimate==0 & dim==0)
{
return(1)
}
statistic <- estimate/dim
a1 <- var.Tr/obs
a2 <- var.Co/obs
dof <- ((a1 + a2)^2)/( (a1^2)/(obs - 1) + (a2^2)/(obs - 1) )
p.value <- (1-MATCHpt(abs(statistic), df=dof))*2
return(p.value)
} #end of Mt.test.unpaired.pvalue
ks.fast <- function(x, y, n.x, n.y, n)
{
w <- c(x, y)
z <- cumsum(ifelse(order(w) <= n.x, 1/n.x, -1/n.y))
z <- z[c(which(diff(sort(w)) != 0), n.x + n.y)]
return( max(abs(z)) )
} #ks.fast
index.treated <- rr[,1]
index.control <- rr[,2]
weights <- rr[,3]
tol <- .Machine$double.eps*100
storage.t <- c(rep(9,nvars))
storage.k <- c(rep(9,nvars))
fs.ks <- matrix(nrow=nvars, ncol=1)
s.ks <- matrix(nrow=nvars, ncol=1)
bbcount <- matrix(0, nrow=nvars, ncol=1)
dummy.indx <- matrix(0, nrow=nvars, ncol=1)
w <- c(X[,1][index.treated], X[,1][index.control])
obs <- length(w)
n.x <- length(X[,1][index.treated])
n.y <- length(X[,1][index.control])
cutp <- round(obs/2)
w <- matrix(nrow=obs, ncol=nvars)
for (i in 1:nvars)
{
w[,i] <- c(X[,i][index.treated], X[,i][index.control])
if(paired)
{
t.out <- Mt.test.pvalue(X[,i][index.treated],
X[,i][index.control],
weights = weights)
} else {
t.out <- Mt.test.unpaired.pvalue(X[,i][index.treated],
X[,i][index.control],
weights = weights)
}
storage.t[i] <- t.out
dummy.indx[i] <- length(unique(X[,i])) < 3
if (!dummy.indx[i] & ks & nboots > 9)
{
fs.ks[i] <- ks.fast(X[,i][index.treated], X[,i][index.control],
n.x=n.x, n.y=n.y, n=obs)
} else if(!dummy.indx[i] & ks)
{
storage.k[i] <- Mks.test(X[,i][index.treated], X[,i][index.control])$p.value
}
}#end of i loop
if (ks & nboots > 9)
{
n.x <- cutp
n.y <- obs-cutp
for (b in 1:nboots)
{
sindx <- sample(1:obs, obs, replace = TRUE)
for (i in 1:nvars)
{
if (dummy.indx[i])
next;
X1tmp <- w[sindx[1:cutp],i ]
X2tmp <- w[sindx[(cutp + 1):obs], i]
s.ks[i] <- ks.fast(X1tmp, X2tmp, n.x=n.x, n.y=n.y, n=obs)
if (s.ks[i] >= (fs.ks[i] - tol) )
bbcount[i] <- bbcount[i] + 1
}#end of i loop
} #end of b loop
for (i in 1:nvars)
{
if (dummy.indx[i])
{
storage.k[i] <- 9
next;
}
storage.k[i] <- bbcount[i]/nboots
}
storage.k[storage.k==9]=storage.t[storage.k==9]
output <- c(storage.t, storage.k)
} else if(ks){
storage.k[storage.k==9]=storage.t[storage.k==9]
output <- c(storage.t, storage.k)
} else {
output <- storage.t
}
if(sum(is.na(output)) > 0) {
output[is.na(output)] = 2
warning("output has NaNs")
}
if (verbose == TRUE)
{
cat("\n")
for (i in 1:nvars)
{
cat("\n", i, " t-test p-val =", storage.t[i], "\n" )
if(ks)
cat(" ", i, " ks-test p-val = ", storage.k[i], " \n",sep="")
}
cat("\nsorted return vector:\n", sort(output), "\n")
cat("number of return values:", length(output), "\n")
}
return(output)
} #end of GenBalance.internal
GenBalanceQQ.internal <- function(rr, X, summarystat="mean", summaryfunc="mean")
{
index.treated <- rr[,1]
index.control <- rr[,2]
nvars <- ncol(X)
qqsummary <- c(rep(NA,nvars))
for (i in 1:nvars)
{
qqfoo <- qqstats(X[,i][index.treated], X[,i][index.control], standardize=TRUE)
if (summarystat=="median")
{
qqsummary[i] <- qqfoo$mediandiff
} else if (summarystat=="max") {
qqsummary[i] <- qqfoo$maxdiff
} else {
qqsummary[i] <- qqfoo$meandiff
}
} #end of for loop
if (summaryfunc=="median")
{
return(median(qqsummary))
} else if (summaryfunc=="max") {
return(sort(qqsummary, decreasing=TRUE))
} else if (summaryfunc=="sort") {
return(sort(qqsummary, decreasing=TRUE))
} else {
return(mean(qqsummary))
}
} #end of GenBalanceQQ.internal
if (is.function(fit.func)) {
a <- fit.func(rr, BalanceMatrix)
return(a)
} else if (fit.func=="pvals")
{
a <- GenBalance.internal(rr=rr, X=BalanceMatrix, nvars=balancevars, nboots=nboots,
ks=ks, verbose=verbose, paired=paired)
a <- loss.func(a)
return(a)
} else if (fit.func=="qqmean.mean") {
a <- GenBalanceQQ.internal(rr=rr, X=BalanceMatrix, summarystat="mean", summaryfunc="mean")
return(a)
} else if (fit.func=="qqmean.max") {
a <- GenBalanceQQ.internal(rr=rr, X=BalanceMatrix, summarystat="mean", summaryfunc="max")
return(a)
} else if (fit.func=="qqmax.mean") {
a <- GenBalanceQQ.internal(rr=rr, X=BalanceMatrix, summarystat="max", summaryfunc="mean")
return(a)
} else if (fit.func=="qqmax.max") {
a <- GenBalanceQQ.internal(rr=rr, X=BalanceMatrix, summarystat="max", summaryfunc="max")
return(a)
} else if (fit.func=="qqmedian.median") {
a <- GenBalanceQQ.internal(rr=rr, X=BalanceMatrix, summarystat="median", summaryfunc="median")
return(a)
} else if (fit.func=="qqmedian.max") {
a <- GenBalanceQQ.internal(rr=rr, X=BalanceMatrix, summarystat="median", summaryfunc="max")
return(a)
}
} #end genoudfunc
#cluster info
clustertrigger=1
if (is.logical(cluster))
{
if (cluster==FALSE) {
clustertrigger=0
} else {
stop("cluster option must be either FALSE, an object of the 'cluster' class (from the 'parallel' package) or a list of machines so 'genoud' can create such an object")
}
}
if(clustertrigger) {
parallel.exists = requireNamespace("parallel")
if (!parallel.exists) {
stop("The 'cluster' feature cannot be used unless the package 'parallel' can be loaded.")
}
}
if(clustertrigger)
{
GENclusterExport <- function (cl, list, envir = .GlobalEnv)
{
gets <- function(n, v) {
assign(n, v, envir = envir)
NULL
}
for (name in list) {
parallel::clusterCall(cl, gets, name, get(name))
}
}
if (class(cluster)[1]=="SOCKcluster" | class(cluster)[1]=="PVMcluster" | class(cluster)[1]=="spawnedMPIcluster" | class(cluster)[1]=="MPIcluster") {
clustertrigger=1
cl <- cluster
cl.genoud <- cl
} else {
clustertrigger=2
cluster <- as.vector(cluster)
cat("Initializing Cluster\n")
cl <- parallel::makePSOCKcluster(cluster)
cl.genoud <- cl
}
} else {
cl.genoud <- FALSE
}#end of clustertrigger
if (clustertrigger > 0)
{
#create restrict.summary, because passing the entire restrict matrix is too much
parallel::clusterEvalQ(cl, library("Matching"))
GENclusterExport(cl, c("s1.N", "s1.All", "s1.M", "s1.Tr", "s1.X", "nvars",
"tolerance", "distance.tolerance", "weights",
"BalanceMatrix", "balancevars", "nboots", "ks", "verbose", "paired", "loss.func",
"fit.func"))
if(GenMatchCaliper.trigger) {
GENclusterExport(cl, c("caliperFlag", "CaliperVec", "Xorig", "restrict.trigger", "restrict","replace"))
}
GENclusterExport(cl, "genoudfunc")
}
do.max <- FALSE
if(!is.function(fit.func))
{
if (fit.func=="pvals")
do.max <- TRUE
}
rr <- rgenoud::genoud(genoudfunc, nvars=nvars, starting.values=starting.values,
pop.size=pop.size, max.generations=max.generations,
wait.generations=wait.generations, hard.generation.limit=hard.generation.limit,
Domains=Domains,
MemoryMatrix=MemoryMatrix,
max=do.max, gradient.check=FALSE, data.type.int=data.type.integer,
hessian=FALSE,
BFGS=FALSE, project.path=project.path, print.level=print.level,
lexical=lexical,
cluster=cl.genoud,
balance=balance,
...)
wmatrix <- diag(rr$par, nrow=nvars)
if ( min(eigen(wmatrix, symmetric=TRUE, only.values=TRUE)$values) < tolerance )
wmatrix <- wmatrix + diag(nvars)*tolerance
ww <- chol(wmatrix)
if(!GenMatchCaliper.trigger)
{
mout <- FastMatchC(N=s1.N, xvars=nvars, All=s1.All, M=s1.M,
cdd=distance.tolerance, ww=ww, Tr=s1.Tr, Xmod=s1.X,
weights=weights)
rr2 <- list(value=rr$value, par=rr$par, Weight.matrix=wmatrix, matches=mout, ecaliper=NULL)
} else {
mout <- MatchLoopC(N=s1.N, xvars=nvars, All=s1.All, M=s1.M,
cdd=distance.tolerance,
caliperflag=caliperFlag,
replace=replace, ties=ties,
ww=ww, Tr=s1.Tr, Xmod=s1.X, weights=weights,
CaliperVec=CaliperVec, Xorig=Xorig,
restrict.trigger=restrict.trigger, restrict=restrict,
DiagWeightMatrixFlag=1)
#no matches
if(mout[1,1]==0) {
warning("no valid matches found by GenMatch")
}
rr2 <- list(value=rr$value, par=rr$par, Weight.matrix=wmatrix, matches=mout, ecaliper=CaliperVec)
}
if (clustertrigger==2)
parallel::stopCluster(cl)
class(rr2) <- "GenMatch"
return(rr2)
} #end of GenMatch
|
348143b2b44b3db225adc5cbba16e5588d54e85e
|
861d85c0a3d8dc4be9caee89df4cacea16b461df
|
/UniversityCleanCode.R
|
a490b8ee4779d8fbce456bfe9d8e19d83dd08800
|
[] |
no_license
|
s-mcknight/Sports-Education-Ranking
|
70f5bf7ee5736ba412f51ca5693f597f0f87f205
|
f54a5483fd578ffbf07cd891bf50010c4f2aecf8
|
refs/heads/master
| 2020-03-13T22:16:49.267524
| 2018-04-27T15:35:40
| 2018-04-27T15:35:40
| 131,313,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,674
|
r
|
UniversityCleanCode.R
|
#read in data
colleges <- read.csv("Colleges.csv", header=TRUE)[,-1]
rownames(colleges) <- colleges$SchoolName
#smaller version of dataset with only important variables
colleges_small <- colleges[, c(2, 4:6, 11, 13, 15, 17, 21:22, 24:34)]
#manipulating variables
colleges_small$Div <- as.factor(colleges_small$Div)
colleges_small$Accredited <- as.factor(colleges_small$Accredited)
#removing colleges that aren't accredited (1 college)
colleges_small <- colleges_small[-c(which(colleges_small$Accredited == 0)),]
#removing accredited variable
colleges_small <- colleges_small[,-12]
#missingness without sports
require(Amelia)
missmap(colleges_small[,-c(5:8)]) #PT Ret and ACT/SAT have most missing
M <- cor(colleges_small[,-1], use="pairwise.complete.obs")
corrplot::corrplot.mixed(M)
par(mfrow=c(2,2), xpd=FALSE)
library(beanplot)
plot(colleges_small$AvgSAT~colleges_small$MedACT, xlab="Median ACT", ylab="Average SAT", main="a) SAT vs. ACT")
abline(lm(colleges_small$AvgSAT~colleges_small$MedACT),col="red")
text(x=15, y=1400, "r=0.99", col="red")
plot(colleges_small$Comp150~colleges_small$FTRet, xlab="Full-Time Retention Rate", ylab="6-year Completion Rate", main="b) Completion Rate vs. Retention Rate")
abline(lm(colleges_small$Comp150~colleges_small$FTRet), col="red")
text(x=0.4, y=0.9, "r=0.88", col="red")
beanplot(colleges_small$PctPT ~ is.na(colleges_small$PTRet), main="c) Percent Part-Time by Missingness of Part-Time Retention", xlab="Part-Time Retention Missing?", ylab="Percent Part-Time", what=c(1,1,1,0))
boxplot(colleges_small$MedEarn10Yr, main="d) Median Earnings After 10 Years", horizontal=TRUE, xlab="Earnings ($)")
cor(colleges_small$AvgSAT,colleges_small$PctAdmit, use="pairwise.complete.obs")
par(mfrow=c(1,2))
plot(colleges_small$Comp150~colleges_small$PctPT, main="a) Completion Rate vs. Percent Part-Time", xlab="Percent Part-Time", ylab="6-year Completion Rate")
abline(lm(colleges_small$Comp150~colleges_small$PctPT), col="red")
text(x=0.6, y=0.9, "r=-0.52", col="red")
plot(colleges_small$AvgSAT~colleges_small$PctAdmit, xlab="Percent Admitted", ylab="Average SAT", main="b) SAT vs. Admission")
abline(lm(colleges_small$AvgSAT~colleges_small$PctAdmit), col="red")
text(x=0.9, y=1400, "r=-0.45", col="red")
boxplot(colleges_small$PctPT ~ is.na(colleges_small$PTRet), main="Percent Part-Time by Missingness of Part-Time Retention", xlab="Part-Time Retention Missing?", ylab="Percent Part-Time")
#association between Percent part-time and missingness of part-time retention(delete part-time retention)
#deleting variables above
colleges_small <- colleges_small[,-c(9,13,14)]
#variable manipulation
colleges_small$Div <- ifelse(colleges_small$Div==1, 1, 0)
colleges_small <- plyr::rename(colleges_small, c(PCTFLOAN = "PctFLoan"))
colleges_small$PctFLoan <- as.numeric(colleges_small$PctFLoan)
pairs(colleges_small[,c(2:8, 14)]) #sports and completion rate
pairs(colleges_small[,9:17]) #student/school qualities
#median earnings looks skewed
#univariate plots of median earnings
boxplot(colleges_small$MedEarn10Yr, main="Median Earnings After 10 Years", horizontal=TRUE)
boxplot(log(colleges_small$MedEarn10Yr), main="Log Median Earnings After 10 Years", horizontal=TRUE)
#take log of median earnings
colleges_small$logMedEarn10Yr <- log(colleges_small$MedEarn10Yr)
pairs(colleges_small[,c(9:12, 14:18)]) #plot above using log median earnings
#football win percent has about 50% missingness; this is a variable for the existence of a football team
colleges_small$Football <- ifelse(is.na(colleges_small$FBWinPct), 0, 1)
library(GGally)
library(ggplot2)
ggpairs(colleges_small[,c(1,3,5,6,8,19,9,10,18,14,2,17)], columnLabels = c("Division", "# Sports", "MBB Win %", "WBB Win %", "WVB Win %", "Football", "Avg. SAT", "% Part-Time", "log(Earnings)", "6-Yr Completion", "GSR", "% Admit"))
ggpairs(colleges_small[,c(9,18,14,2,3)], columnLabels=c("Avg. Sat", "log(Earnings)", "6-Yr Completion", "GSR", "# Sports"))
model4 <- '
#Measurement model
Sports=~Div+NoSports+MBBWinPct+WBBWinPct+WVBWinPct+Football
Student=~AvgSAT+PctPT+logMedEarn10Yr+Comp150+GSR+PctAdmit
#Make a latent trait from two latent traits
overall=~Sports+Student
'
lavaan_sem2 <- lavaan::sem(model4, data=colleges_small, std.lv=TRUE)
lavaan::summary(lavaan_sem2, fit.measures=TRUE) #fit is
semPlot::semPaths(lavaan_sem2, "par", mar=c(10,10,10,10), fade=F, layout="tree", nCharNodes=8, label.cex=1.5)
#Robust SEs:
lavaan_sem_r2 <- lavaan::sem(model4, data=colleges_small, std.lv=TRUE,se="robust.huber.white")
lavaan::summary(lavaan_sem_r2, fit.measures=TRUE, standardized=TRUE)
lavaan::parameterEstimates(lavaan_sem_r2)
plot(colleges_small$PctPT, colleges_small$Comp150)
plot(colleges_small$PctAdmit, colleges_small$AvgSAT)
model5 <- 'School=~AvgSAT+logMedEarn10Yr+Comp150+GSR+NoSports'
lavaan_sem3 <- lavaan::sem(model5, data=colleges_small, std.lv=TRUE, se="robust.huber.white")
lavaan::summary(lavaan_sem3, fit.measures=TRUE, standardized=TRUE) #fit is
semPlot::semPaths(lavaan_sem3, "par", mar=c(10,10,10,10), fade=F, layout="tree", nCharNodes=8, label.cex=1.5)
require(mice)
set.seed(062117)
# generate 5 multiple complete datasets
out <- mice(colleges_small, m=5)
D1 <- complete(out, 1)
D2 <- complete(out, 2)
D3 <- complete(out, 3)
D4 <- complete(out, 4)
D5 <- complete(out, 5)
# fit model for each complete dataset
require(lavaan)
fit1 <- sem(model5, data=D1, std.lv=TRUE, se="robust.huber.white")
fit2 <- sem(model5, data=D2, std.lv=TRUE, se="robust.huber.white")
fit3 <- sem(model5, data=D3, std.lv=TRUE, se="robust.huber.white")
fit4 <- sem(model5, data=D4, std.lv=TRUE, se="robust.huber.white")
fit5 <- sem(model5, data=D5, std.lv=TRUE, se="robust.huber.white")
# predict scores for all models
p1 <- predict(fit1)
p2 <- predict(fit2)
p3 <- predict(fit3)
p4 <- predict(fit4)
p5 <- predict(fit5)
# compute average across 5 sets of scores:
scores_mice <- (p1 + p2 + p3 + p4 + p5)/5
par(mfrow=c(1,2))
qqnorm(scores_mice, main="a) Q-Q Plot of Scores")
qqline(scores_mice, col="red")
scores_micedf <- scores_mice[order(scores_mice)]
require(beanplot)
beanplot(scores_micedf, what=c(0,1,0,0), col="white", main="b) Distribution of Scores", ylab="Highest Scores Lowest Scores")
beanplot(scores_micedf[-321], col=c(0,8,8,8), what=c(0,0,0,1), method="stack", add=TRUE)
scores_micedf <- as.data.frame(scores_micedf)
rownames(scores_micedf) <- rownames(colleges_small[order(scores_mice),])
scores_micedf$rank <- 1:646
write.csv(scores_micedf, "rankings.csv")
|
40c1bbb48bc3a23bf635c74ac6d66147c33cea21
|
9d0500397db28edeba4ab6d3b801cb5edcdc9371
|
/man/decathlon_s2p.Rd
|
adb5bf154233a60a17d6c8cc133bb335e82c331b
|
[] |
no_license
|
VictorNautica/multievents
|
8e7dff642127baa34c0882a3bcfd3c865b00f3db
|
9e0bc4665446e0a66e9f06915fc8c262b047a13c
|
refs/heads/master
| 2021-07-14T12:20:43.589043
| 2020-06-07T16:14:55
| 2020-06-07T16:14:55
| 162,026,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 980
|
rd
|
decathlon_s2p.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decathlon_s2p.R
\name{decathlon_s2p}
\alias{decathlon_s2p}
\title{Convert decathlon scores in to points}
\usage{
decathlon_s2p(
X100m = 9999,
LJ = 0,
SP = 0,
HJ = 0,
X400m = 9999,
X110mh = 9999,
DT = 0,
PV = 0,
JT = 0,
X1500m = 9999
)
}
\arguments{
\item{X100m}{A 100m time, in seconds}
\item{LJ}{A long jump measurement, in m}
\item{SP}{A shot put measurement, in m}
\item{HJ}{A high jump measurement, in m}
\item{X400m}{A 400m time, in seconds}
\item{X110mh}{A 110m hurdles time, in seconds}
\item{DT}{A discus throw measurement, in m}
\item{PV}{A pole vault measurement, in m}
\item{JT}{A javelin throw measurement, in m}
\item{X1500m}{A 1500m time, in seconds, or in minutes:seconds (m:ss) as a character vector}
}
\description{
\code{decathlon_s2p} calculates scores for performances in the decathlon, as
well as providing useful descriptive and summary statistics.
}
|
5a0ae955046e63ee9a4d5839ed47cc834f60cd48
|
23d1c6c910bb2cf19164c934242efbce7653f9ef
|
/R/newtons_method.R
|
cff832b05e44a17b9a2fda62e1dc489ff385ffac
|
[] |
no_license
|
ooelrich/OBBP
|
c12c9d96cf400c54e6e86081e1650f5ef4f18ad2
|
6c0679ae11438f3635400610617fed0de03b1b68
|
refs/heads/master
| 2022-04-28T20:42:01.651349
| 2020-04-30T12:28:27
| 2020-04-30T12:28:27
| 257,657,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 762
|
r
|
newtons_method.R
|
#' Newton's method for finding optima
#'
#' A simple implementation of Newton's method to find posterior mode.
#' For use with the Laplace approximation in exercise 2, lab1.
#'
#' @param y Vector of outcome variables.
#' @param k_xx Covariance matrix of the GP.
#' @param precision Measured by the squared distance between two consequtive
#' f vectors. Defaults to 0.05.
#'
#' @export
newtons_method <- function(y, k_xx, precision = 0.05) {
f <- rep(0, length(y))
f_old <- rep(1, length(y))
while ((t(f - f_old) %*% (f - f_old)) > precision) {
w <- diag(exp(as.vector(f)))
ch <- chol(solve(k_xx + diag(rep(0.01, 16))) + w)
f_old <- f
f <- solve(ch) %*% solve(t(ch)) %*% (w %*% f + y - exp(f))
}
return(f)
}
|
0f923d1e037d1e38e0a2c76a44a12a911dfbaa07
|
387afc2394b4ea9857019236b991ba01428a5dd5
|
/geom_density_ridges/geom_density_ridges - limited axis - TidyTuesday 30-7-2019.R
|
a3a849de7135a47cb3aed0b43a44e347ec2ae09c
|
[] |
no_license
|
JuanmaMN/tidyverse-ggplot2
|
0a757bdafa0520fa5b68d72aaf3260693632a482
|
1a103a1cc879bfa389922f4ac33fb2c951cd3234
|
refs/heads/master
| 2023-06-08T08:29:06.833615
| 2023-06-07T17:44:41
| 2023-06-07T17:44:41
| 202,595,817
| 14
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,560
|
r
|
geom_density_ridges - limited axis - TidyTuesday 30-7-2019.R
|
# Upload the data ---------------------------------------------------------
video_games <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-07-30/video_games.csv")
View(video_games)
str(video_games)
colnames(video_games)
# unique(video_games$owners)
# Upload the necessary packages -------------------------------------------
library(lubridate)
library(tidyverse)
library(hrbrthemes)
library(ggridges)
# Prepare the data --------------------------------------------------------
video_games$release_date<-mdy(video_games$release_date)
test2<-video_games%>%
mutate(Year=year(release_date))%>%
group_by(owners, Year) %>% filter(str_detect(owners, "000,000")) %>%
filter(!str_detect(owners, "200,000,000"))%>%
summarize (mean=round(mean(average_playtime,na.rm=TRUE),2))
View(test2)
# ggridges ----------------------------------------------------------------
ggplot(test2, aes(x=Year,y = reorder(owners,desc(owners)), fill = owners, group = owners)) +
geom_density_ridges2(scale =1) +
theme_ft_rc(grid="X")+
labs(
title = "Video Games Dataset 2004-2018",
subtitle = "TidyTuesday 30.7.2019",
caption = "\n Source: TidyTuesday
Visualization: JuanmaMN (Twitter @Juanma_MN)",
x = "Average playtime in minutes",
y = "") +
scale_fill_brewer(palette = "Spectral") + theme(legend.position = "",
legend.box = "") +
scale_x_continuous(
breaks = c(2004:2018), limits = c(2000, 2025),
expand = c(0, 0)
)
|
25e79c478825e5a8c7f985ef21683f87f9d20ff6
|
f50655f401237b4a30623f2f0e95ee47cbc204bf
|
/provided/simulator-synthesis.R
|
b85d9445378295c81a29c68b9b50b3c0aac9a1c6
|
[] |
no_license
|
ascheppach/Consulting_MBO
|
1d0a60de959ba5dfc4941507c95664ebf2bd3be3
|
a08e758f96ea199cebb9107c002a6b09c28926c3
|
refs/heads/master
| 2020-12-19T06:02:18.387552
| 2020-04-10T19:37:31
| 2020-04-10T19:37:31
| 235,640,695
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,598
|
r
|
simulator-synthesis.R
|
#!/usr/bin/env Rscript
library(mlr)
library(mlrMBO)
library(smoof)
# data
data_synthesis <- read.csv("provided/synthesis.csv")
data_synthesis$X <- NULL
# model from all data points
model = train(makeLearner("regr.randomForest"), makeRegrTask(data = data_synthesis, target = "target"))
# Bayesian Optimization
fun = function(x) {
df = as.data.frame(x)
return(getPredictionResponse(predict(model, newdata = df)))
}
ps = makeParamSet(
makeNumericParam("f", lower = 0, upper = 0.25),
makeNumericParam("k", lower = 0, upper = 0.1),
makeNumericParam("du", lower = 0, upper = 2e-5),
makeNumericParam("dv", lower = 0, upper = 2e-5),
makeIntegerParam("x", lower = 0, upper = 200),
makeIntegerParam("y", lower = 0, upper = 200)
)
objfun = makeSingleObjectiveFunction(
name = "Synthesis",
fn = fun,
par.set = ps,
has.simple.signature = FALSE,
minimize = FALSE
)
# sample 10 points for the initial surrogate model
initial.data = data_synthesis[sample(1:nrow(data_synthesis), 10), ]
cat(paste("Best training fitness: ", max(initial.data$target), "\n", sep = ""))
ctrl = makeMBOControl(y.name = "target")
ctrl = setMBOControlInfill(ctrl, opt = "focussearch", opt.focussearch.maxit = 20, opt.focussearch.points = 1000, crit = makeMBOInfillCritEI())
ctrl = setMBOControlTermination(ctrl, iters = 50)
res = mbo(objfun, design = initial.data, control = ctrl, show.info = TRUE)
cat("Best configuration:\n")
cat(paste(paste(lapply(names(res$x), function(n) { paste(n, res$x[n], sep = ": ") }), collapse = ", "), "; fitness: ", res$y, "\n", sep = ""))
|
d76d99a3a6b959f71ebc1438458a103c8ed48ddd
|
109b2a458d516e49be1ec092f29a97a60cb272ad
|
/plot1.R
|
79d4e644aa02c21188d8c8f31128ebb4a0b99ed6
|
[] |
no_license
|
cheedep/ExData_Plotting1
|
d9a5036cb394b7a7384c6d415983ecb5125a9a2a
|
e69e5c1bfab61457b1a3a6480a8bc3399e542714
|
refs/heads/master
| 2021-01-17T22:55:14.602287
| 2015-02-08T19:47:15
| 2015-02-08T19:47:15
| 30,469,493
| 0
| 0
| null | 2015-02-07T20:41:10
| 2015-02-07T20:41:09
| null |
UTF-8
|
R
| false
| false
| 696
|
r
|
plot1.R
|
makePlot1 <- function(){
mydf <- read.csv("household_power_consumption.txt", header = TRUE,sep = ";", na.strings="?", stringsAsFactors=FALSE)
mydf$Date <- as.Date(mydf$Date, format="%d/%m/%Y")
mydf$Time <- strptime(mydf$Time, "%H:%M:%S")
febData <- subset(mydf, Date == "2007-02-01" | Date =="2007-02-02")
hist(febData$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
png(file = "./plot1.png", width = 480, height = 480, units = "px", type = "cairo", bg = "transparent")
hist(febData$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
blah <- dev.off()
}
|
4a700d9cb43cea1528c7e142fc62231b486183c4
|
615d2dd18afed5427c70649afd3e5085f17ba289
|
/cachematrix.R
|
f78030033e6c7cf0e866e0739824a8c33ce5def2
|
[] |
no_license
|
sheltonmath/ProgrammingAssignment2
|
8fef9d01856c102227023038c15682983f743e43
|
857353cf01bd2e3290bc3e22394069e8d8bf0ecf
|
refs/heads/master
| 2021-01-18T15:47:48.079104
| 2015-12-27T22:27:13
| 2015-12-27T22:27:13
| 48,660,536
| 0
| 0
| null | 2015-12-27T21:09:35
| 2015-12-27T21:09:34
| null |
UTF-8
|
R
| false
| false
| 870
|
r
|
cachematrix.R
|
## The functions below allow for the cashing of matrix inversion.
## This speeds up computation by preventing the need to repeatedly perfom inversion.
## makeCacheMatrix creates a matrix that is able to store its own inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inv) inv <<- solve(x)
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve either returns the stored inverse or it calculates the inverse.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
## Return a matrix that is the inverse of 'x'
|
2d5e42483007254b39ae6c90090a86ff042e8c8e
|
b2ba89e08e7535a77e6e9d94d6e8d4720a338a4c
|
/processing_scripts.R
|
f5a491b7a663da7b7336714e153f06f6e575630e
|
[] |
no_license
|
kinxiel/state_of_js_GalacticEdition
|
a5a91a8849f1397ad539899ca76a050a36d88fb5
|
9318baca4f31ef09cb4997379b4c6c4300eb6740
|
refs/heads/master
| 2020-04-11T22:43:56.130134
| 2018-12-17T15:12:13
| 2018-12-17T15:12:13
| 162,146,091
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,154
|
r
|
processing_scripts.R
|
# Packages
library(tidyverse)
# Some random scripts for data extraction
# Data Import
raw <- read.csv("data/sojs18.csv")
##################################################################
## JavaScript Flavors ##
##################################################################
# Extract flavors of JavaScript
javascript_flavors <- raw[,2:7]
javascript_flavors_table <- data.frame()
for (i in 1:ncol(javascript_flavors)){
data_ <- javascript_flavors[,i]
data_ <- table(data_)
assign(paste(names(javascript_flavors[i])), data_)
if (nrow(javascript_flavors_table > 0)){
javascript_flavors_table <- cbind(javascript_flavors_table, get(paste(names(javascript_flavors[i]))))
} else {
javascript_flavors_table <- get(paste(names(javascript_flavors[i])))
}
}
# Convert to data frame
javascript_flavors_table <- as.data.frame(javascript_flavors_table)
names(javascript_flavors_table) <- names(javascript_flavors)
##################################################################
## Front End Frameworks ##
##################################################################
# Extract flavors of JavaScript
javascript_front_end <- raw[,9:14]
javascript_front_end_table <- data.frame()
for (i in 1:ncol(javascript_front_end)){
data_ <- javascript_front_end[,i]
data_ <- table(data_)
assign(paste(names(javascript_front_end[i])), data_)
if (nrow(javascript_front_end_table > 0)){
javascript_front_end_table <- cbind(javascript_front_end_table, get(paste(names(javascript_front_end[i]))))
} else {
javascript_front_end_table <- get(paste(names(javascript_front_end[i])))
}
}
# Convert to data frame
javascript_front_end_table <- as.data.frame(javascript_front_end_table)
names(javascript_front_end_table) <- names(javascript_front_end)
##################################################################
## Data Layer ##
##################################################################
# Extract flavors of JavaScript
data_layer <- raw[,16:20]
data_layer_table <- data.frame()
for (i in 1:ncol(data_layer)){
data_ <- data_layer[,i]
data_ <- table(data_)
assign(paste(names(data_layer[i])), data_)
if (nrow(data_layer_table > 0)){
data_layer_table <- cbind(data_layer_table, get(paste(names(data_layer[i]))))
} else {
data_layer_table <- get(paste(names(data_layer[i])))
}
}
# Convert to data frame
data_layer_table <- as.data.frame(data_layer_table)
names(data_layer_table) <- names(data_layer)
##################################################################
## Backend Frameworks ##
##################################################################
# Extract flavors of JavaScript
javascript_back_end <- raw[,22:27]
javascript_back_end_table <- data.frame()
for (i in 1:ncol(javascript_back_end)){
data_ <- javascript_back_end[,i]
data_ <- table(data_)
assign(paste(names(javascript_back_end[i])), data_)
if (nrow(javascript_back_end_table > 0)){
javascript_back_end_table <- cbind(javascript_back_end_table, get(paste(names(javascript_back_end[i]))))
} else {
javascript_back_end_table <- get(paste(names(javascript_back_end[i])))
}
}
# Convert to data frame
javascript_back_end_table <- as.data.frame(javascript_back_end_table)
names(javascript_back_end_table) <- names(javascript_back_end)
##################################################################
## Testing Frameworks ##
##################################################################
# Extract flavors of JavaScript
testing <- raw[,29:35]
testing_table <- data.frame()
for (i in 1:ncol(testing)){
data_ <- testing[,i]
data_ <- table(data_)
assign(paste(names(testing[i])), data_)
if (nrow(testing_table > 0)){
testing_table <- cbind(testing_table, get(paste(names(testing[i]))))
} else {
testing_table <- get(paste(names(testing[i])))
}
}
# Convert to data frame
testing_table <- as.data.frame(testing_table)
names(testing_table) <- names(testing)
|
8ba5060ed35fa5c8e6775ae9c02ce46417a1a2ba
|
c2b6d7b0f0ce47fac4a0bc52cbfc575921a54599
|
/man/getNBGaussianLikelihood.Rd
|
6189d1d5cd00fa319b75441c2cff88e604d1fc16
|
[
"MIT"
] |
permissive
|
de-Boer-Lab/MAUDE
|
ba32c080a3175522269fa807955ffbfabd085ef8
|
7aa20cc9b28c06d2772fec23c20ef2ec56a7c026
|
refs/heads/master
| 2023-04-17T12:29:00.794114
| 2022-02-26T22:53:31
| 2022-02-26T22:53:31
| 135,627,989
| 4
| 2
|
MIT
| 2022-02-26T22:39:47
| 2018-05-31T19:43:46
|
HTML
|
UTF-8
|
R
| false
| true
| 1,553
|
rd
|
getNBGaussianLikelihood.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MAUDE.R
\name{getNBGaussianLikelihood}
\alias{getNBGaussianLikelihood}
\title{Calculate the log likelihood of observed read counts}
\usage{
getNBGaussianLikelihood(x, mu, k, sigma = 1, nullModel, libFract)
}
\arguments{
\item{x}{a vector of guide counts per bin}
\item{mu}{the mean for the normal expression distribution}
\item{k}{the vector of total counts per bin}
\item{sigma}{for the normal expression distribution (defaults to 1)}
\item{nullModel}{the bin bounds for the null model (for no change in expression)}
\item{libFract}{the fraction of the unsorted library this guide comprises (e.g. from unsorted cells, or sequencing the vector)}
}
\value{
the log likelihood
}
\description{
Uses a normal distribution (N(mu,sigma)) to estimate how many reads are expected per bin under nullModel, and calculates the log likelihood under a negative binomial model. This function is usually not used directly.
}
\examples{
#usually not used directly
#make a bin sorting model with 6 10\% bins
curSortBins = makeBinModel(data.frame(Bin = c("A","B","C","D","E","F"), fraction = rep(0.1,6)))
readsForGuideX =c(10,20,30,100,200,100); #the reads for this guide
getNBGaussianLikelihood(x=readsForGuideX, mu=1, k=rep(1E6,6), sigma=1, nullModel=curSortBins,
libFract = 50/1E6)
getNBGaussianLikelihood(x=readsForGuideX, mu=-1, k=rep(1E6,6), sigma=1, nullModel=curSortBins,
libFract = 50/1E6)
#mu=1 is far more likely (closer to 0) than mu=-1 for this distribution of reads
}
|
8edc6c379095583552d5288311a9b0cfab200074
|
42886f7b175ea5f5f7c40c5c9cf1ee8d91625598
|
/Lab1/Question4.R
|
c3943d8ba927ba0d962138fbba1476e551c49a8a
|
[] |
no_license
|
janish-parikh/CS-605-Data-Analytics-in-R
|
7d8655ea4a08b2f696c89a832c63b659010ff91a
|
c17be6edf9a1da9dae80fefeb07c85e4a1021942
|
refs/heads/master
| 2022-12-19T11:45:54.412299
| 2020-09-05T03:25:42
| 2020-09-05T03:25:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 916
|
r
|
Question4.R
|
#Question4
dataset<-c(43, 37, 50, 51, 58, 105, 52, 45, 45, 10)
#Function to find mean,median,sd,all quartiles and return a vector
explore<-function(x){
data<-c("Mean"=mean(x),"Median"=median(x),
"Standard Deviation" = sd(x),"Quartile-" =quantile(x))
return(data)
}
explore(dataset)
##Below Q1-1.5*IQR or above Q3+1.5*IQR, where Q1 and Q3 are the first and third
#quartiles, respectively, of the variable distribution and IQR=Q3-Q1 is the
#interquartile range.
IQR_outliers <- function(x) {
Q1<-quantile(x,0.25)
Q3<-quantile(x,0.75)
IQR<-(Q3-Q1) #inter-quartile range
left<- (Q1-(1.5*IQR))
right<- (Q3+(1.5*IQR))
outliers<-c(x[x <left],x[x>right])
return(outliers)
}
outliers<-IQR_outliers(dataset)
##Recomputing explore function for dataset without any outliers
reconstructed_dataset<-dataset[!dataset%in%outliers]
print(reconstructed_dataset)
explore(reconstructed_dataset)
|
830ca30a45b39511d17f1c664ca8f063f24f2215
|
d3a4449541e6778cd0f55f2322fe74de3fd78110
|
/R/tableGUI_main_layout.R
|
6fb58516b3b090e0aba6a4093f80e26f82a0da8f
|
[] |
no_license
|
cran/tabplotGTK
|
b828e93c3b568296b1fe9b96d9453d4ca6381968
|
efcea81fdb1f9ef1f310907636d2eac447d08e3c
|
refs/heads/master
| 2021-01-02T22:31:49.166851
| 2012-07-11T00:00:00
| 2012-07-11T00:00:00
| 17,719,480
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,988
|
r
|
tableGUI_main_layout.R
|
tableGUI_main_layout <- function(e) {
# browser()
with(e, {
######################################################
## create GUI
######################################################
## create window
wdw <- gwindow("Tableplot",visible=FALSE)
sbr <- gstatusbar("Preparing...", cont=wdw)
g <- gpanedgroup(cont=wdw)
## create source frame
ggg <- ggroup(horizontal = TRUE, cont = g, expand=TRUE)
frm2 <- gframe(text="Source",horizontal = FALSE, cont = ggg)
size(frm2) <- c(350,400)
grp4 <- ggroup(horizontal = FALSE, cont = frm2, expand=TRUE)
grp9 <- ggroup(horizontal = TRUE, cont = grp4, expand=FALSE)
lbl3 <- glabel("Data.frame:", cont=grp9)
cmb <- gcombobox(datlist, cont=grp9)
svalue(cmb) <- tableGUI_getCurrentDFname(e)
blockCmbHandler <- FALSE
#addSpring(grp9)
btnReload <- gbutton("Reload", cont=grp9, expand=FALSE)
######## temp
# btnTemp <- gbutton("varTbl", cont=grp9, expand=FALSE)
#
# addHandlerClicked(btnTemp, function(h,...) {
# print(e$varTbl)
# })
########
## fill table 1
tbl1 <- gtable(tableGUI_getTbl1(e=e), multiple=TRUE, cont=grp4, expand=TRUE)
grp10 <- ggroup(horizontal = TRUE, cont = grp4, expand=FALSE)
lbl4 <- glabel("Number of Objects:", cont=grp10)
lbl5 <- glabel(nrow(get(svalue(cmb), envir=.GlobalEnv)), cont=grp10)
## create transfer button
grp8 <- ggroup(horizontal = FALSE, cont = ggg, anchor=c(-1, -1),expand=TRUE)
addSpring(grp8)
btnTransfer <- gbutton(">", cont=grp8, expand=TRUE); enabled(btnTransfer) <- FALSE
addSpace(grp8, 100, horizontal=FALSE)
## create config frame
frm <- gframe(text="Tableplot Configuration",horizontal = FALSE, cont = g)
size(frm) <- c(350,400)
grp6 <- ggroup(horizontal = FALSE, cont = frm, expand=TRUE)
#lbl3 <- glabel("Columns", cont=grp6)
table2content <- tableGUI_getTbl2(e=e)
table2content <- table2content[sorted, ]
tbl2 <- gtable(table2content, multiple=TRUE, cont=grp6, expand=TRUE)
grp7 <- ggroup(horizontal = TRUE, cont = grp6, expand=FALSE)
btnUp <- gbutton("Up", cont=grp7, expand=TRUE); enabled(btnUp) <- FALSE
btnDown <- gbutton("Down", cont=grp7, expand=TRUE); enabled(btnDown) <- FALSE
btnScale <- gbutton("Scale", cont=grp7, expand=TRUE); enabled(btnScale) <- FALSE
btnSort <- gbutton("Sort", cont=grp7, expand=TRUE); enabled(btnSort) <- FALSE
btnAsCategory <- gbutton("As Categorical", cont=grp7, expand=TRUE); enabled(btnAsCategory) <- FALSE
btnPal <- gbutton("Palette", cont=grp7, expand=TRUE); enabled(btnPal) <- FALSE
ready <- nrow(table2content)!=0
grp2 <- ggroup(horizontal = TRUE, cont = grp6)
showZoom <- (from!=0 || to!=100) & ready
cbx <- gcheckbox(text="Zoom in", checked = showZoom, cont= grp2)
lbl7 <- glabel("from", cont=grp2)
spbBinsFrom <- gspinbutton(0, 100, by = 10, cont=grp2, expand=FALSE)
svalue(spbBinsFrom) <- from
lbl8 <- glabel("percent to", cont=grp2)
spbBinsTo <- gspinbutton(0, 100, by = 10, cont=grp2, expand=FALSE)
svalue(spbBinsTo) <- to
lbl9 <- glabel("percent", cont=grp2)
enabled(cbx) <- ready
enabled(lbl7) <- showZoom
enabled(spbBinsFrom) <- showZoom
enabled(lbl8) <- showZoom
enabled(spbBinsTo) <- showZoom
enabled(lbl9) <- showZoom
correctFilter <- tableGUI_filter(filter, e)
grp3 <- ggroup(horizontal = TRUE, cont = grp6)
lbl10 <- glabel("Filter:", cont=grp3)
gtxtFilter <- gedit(text= filter, cont=grp3)
enabled(lbl10) <- enabled(gtxtFilter) <- ready
grp1 <- ggroup(horizontal = TRUE, cont = grp6)
lbl1 <- glabel("Number of Row Bins:", cont=grp1)
spbBins <- gspinbutton(0, 1000, by = 10, cont=grp1, expand=TRUE)
svalue(spbBins) <- nBins
enabled(lbl1) <- enabled(spbBins) <- ready
btnSave <- gbutton("Save", cont=grp1, expand=TRUE); enabled(btnSave) <- FALSE
btnRun <- gbutton("Run", cont=grp1, expand=TRUE); enabled(btnRun) <- ready
})
}
|
ac842b1038aee66e521467c697d0f1df0ce5b197
|
76e47464f4313b79f95fecf01067aa3a6b713d8b
|
/R/rISIMIP-package.R
|
3b4f8ed915ebf684777ecd6a97cf6696c2cecfa3
|
[
"MIT"
] |
permissive
|
zejiang-unsw/rISIMIP
|
460116d1f6e23826bb9de57e8ee731d29f379730
|
9f9af06dd51d936932795c4cf2a99216fbfcea23
|
refs/heads/master
| 2021-01-04T09:46:43.105805
| 2019-12-20T11:36:35
| 2019-12-20T11:36:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,582
|
r
|
rISIMIP-package.R
|
#' R Package for handling ISIMIP data
#'
#' Reading and processing ISIMIP NetCDF files
#'
#' @name rISIMIP-package
#' @aliases rISIMIPpackage
#' @docType package
#' @title R Package for handling ISIMIP data
#' @author RS-eco
#' @import raster ncdf4
#' @keywords package
#'
NULL
#'
#' @docType data
#' @name landuse-totals_2005soc
#' @title Total landuse of 2005
#' @description data.frame with percentage cover of each totals landuse class for 2005
#' @details This data.frame depicts the total landuse coverage for 2005 under the 2005soc scenario.
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_ewembi_1995_landonly
#' @title Global bioclimatic data for 30-yr period centered around 1995 acquired from EWEMBI data
#' @description data.frame with global bioclimatic for 1995 according to EWEMBI observed data
#' @details This data.frame includes the global bioclimatic data for 1995 calculated from 30-yr daily EWEMBI data
#' centered around 1995.
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_gfdl-esm2m_rcp26_2080_landonly
#' @title Global bioclimatic data for 30-yr period centered around 2080 acquired from GFDL-ESM2M model data under RCP2.6
#' @description data.frame with global bioclimatic for 2080 according to GFDL-ESM2M model data under RCP2.6
#' @details This data.frame includes the global bioclimatic data for 1995 calculated from 30-yr daily simulated
#' GFDL-ESM2M data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_gfdl-esm2m_rcp60_2080_landonly
#' @title Global bioclimatic data for 30-yr period centered around 2080 acquired from GFDL-ESM2M model data under RCP6.0
#' @description data.frame with global bioclimatic for 2080 according to GFDL-ESM2M model data under RCP6.0
#' @details This data.frame includes the global bioclimatic data for 2080 calculated from 30-yr daily simulated
#' GFDL-ESM2M data centered around 2080 based on the representative concentration pathway RCP6.0
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_hadgem2-es_rcp26_2080_landonly
#' @title Global bioclimatic data for 30-yr period centered around 2080 acquired from HadGEM2-ES model data under RCP2.6
#' @description data.frame with global bioclimatic for 2080 according to HadGEM2-ES model data under RCP2.6
#' @details This data.frame includes the global bioclimatic data for 2080 calculated from 30-yr daily simulated
#' HadGEM2-ES data centered around 2080 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_hadgem2-es_rcp60_2080_landonly
#' @title Global bioclimatic data for 30-yr period centered around 2080 acquired from HadGEM2-ES model data under RCP6.0
#' @description data.frame with global bioclimatic for 2080 according to HadGEM2-ES model data under RCP6.0
#' @details This data.frame includes the global bioclimatic data for 2080 calculated from 30-yr daily simulated
#' HadGEM2-ES data centered around 2080 based on the representative concentration pathway RCP6.0
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_ipsl-cm5a-lr_rcp26_2080_landonly
#' @title Global bioclimatic data for 30-yr period centered around 2080 acquired from IPSL-CM5A-LR model data under RCP2.6
#' @description data.frame with global bioclimatic for 2080 according to IPSL-CM5A-LR model data under RCP2.6
#' @details This data.frame includes the global bioclimatic data for 2080 calculated from 30-yr daily simulated
#' IPSL-CM5A-LR data centered around 2080 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_ipsl-cm5a-lr_rcp60_2080_landonly
#' @title Global bioclimatic data for 30-yr period centered around 2080 acquired from IPSL-CM5A-LR model data under RCP6.0
#' @description data.frame with global bioclimatic for 2080 according to IPSL-CM5A-LR model data under RCP6.0
#' @details This data.frame includes the global bioclimatic data for 2080 calculated from 30-yr daily simulated
#' IPSL-CM5A-LR data centered around 2080 based on the representative concentration pathway RCP6.0
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_miroc5_rcp26_2080_landonly
#' @title Global bioclimatic data for 30-yr period centered around 2080 acquired from MIROC5 model data under RCP2.6
#' @description data.frame with global bioclimatic for 2080 according to MIROC5 model data under RCP2.6
#' @details This data.frame includes the global bioclimatic data for 2080 calculated from 30-yr daily simulated
#' MIROC5 data centered around 2080 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name bioclim_miroc5_rcp60_2080_landonly
#' @title Global bioclimatic data for 30-yr period centered around 2080 acquired from MIROC5 model data under RCP6.0
#' @description data.frame with global bioclimatic for 2080 according to MIROC5 model data under RCP6.0
#' @details This data.frame includes the global bioclimatic data for 2080 calculated from 30-yr daily simulated
#' MIROC5 data centered around 2080 based on the representative concentration pathway RCP6.0
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name delta_runmean31_tas
#' @title Years of global mean temperature thresholds
#' @description data.frame with years of global mean temperature thresholds according to ISIMIP2b climate models
#' @details This data.frame depicts the expert year in which the 31-year running mean of
#' global mean temperature crosses the given thresholds for different models and rcps.
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landseamask_generic
#' @title Land sea mask of ISIMIP data
#' @description RasterLayer with land sea mask of ISIMIP data
#' @details This RasterLayer depicts the global land sea mask used for gridded ISIMIP data at a resolution of 0.5 degree.
#' @format \code{RasterLayer}
NULL
#'
#' @docType data
#' @name protectedareas_annual_1819_2018_landonly
#' @title Annual protected areas data at 0.5 degree
#' @description data.frame with protected areas coverage per year at 0.5 degree resolution
#' @details This data.frame depicts the annual percentage coverage of protected areas for each grid cell
#' at a spatial resolution of 0.5 degree for the years 1819 to 2018.
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name protectedareas_iucn_cat_2018_landonly
#' @title Protected area coverage by IUCN category at 0.5 degree
#' @description data.frame with protected area coverage for each IUCN category in 2018 at 0.5 degree resolution
#' @details This data.frame depicts the percentage coverage of protected areas per IUCN category for each grid cell
#' at a spatial resolution of 0.5 degree for the year 2018.
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp26_gfdl-esm2m_1995
#' @title Global total landuse for 30-yr period centered around 1995 derived from GFDL-ESM2M model data under RCP2.6
#' @description data.framewith percentage cover of each totals landuse class for 1995
#' according to GFDL-ESM2M model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 1995 calculated from 30-yr yearly simulated
#' GFDL-ESM2M data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp26_gfdl-esm2m_2080
#' @title Global total landuse for 30-yr period centered around 2080 derived from GFDL-ESM2M model data under RCP2.6
#' @description data.framewith percentage cover of each totals landuse class for 2080
#' according to GFDL-ESM2M model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 2080 calculated from 30-yr yearly simulated
#' GFDL-ESM2M data centered around 2080 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp26_hadgem2-es_1995
#' @title Global total landuse for 30-yr period centered around 1995 derived from HadGEM2-ES model data under RCP2.6
#' @description data.framewith percentage cover of each totals landuse class for 1995
#' according to HadGEM2-ES model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 1995 calculated from 30-yr yearly simulated
#' HadGEM2-ES data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp26_hadgem2-es_2080
#' @title Global total landuse for 30-yr period centered around 2080 derived from HadGEM2-ES model data under RCP2.6
#' @description data.framewith percentage cover of each totals landuse class for 2080
#' according to HadGEM2-ES model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 2080 calculated from 30-yr yearly simulated
#' HadGEM2-ES data centered around 2080 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp26_ipsl-cm5a-lr_1995
#' @title Global total landuse for 30-yr period centered around 1995 derived from IPSL-CM5A-LR model data under RCP2.6
#' @description data.framewith percentage cover of each totals landuse class for 1995
#' according to IPSL-CM5A-LR model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 1995 calculated from 30-yr yearly simulated
#' IPSL-CM5A-LR data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp26_ipsl-cm5a-lr_2080
#' @title Global total landuse for 30-yr period centered around 2080 derived from IPSL-CM5A-LR model data under RCP2.6
#' @description data.framewith percentage cover of each totals landuse class for 2080
#' according to IPSL-CM5A-LR model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 2080 calculated from 30-yr yearly simulated
#' IPSL-CM5A-LR data centered around 2080 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp26_miroc5_1995
#' @title Global total landuse for 30-yr period centered around 1995 derived from MIROC5 model data under RCP2.6
#' @description data.framewith percentage cover of each totals landuse class for 1995
#' according to MIROC5 model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 1995 calculated from 30-yr yearly simulated
#' MIROC5 data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp26_miroc5_2080
#' @title Global total landuse for 30-yr period centered around 2080 derived from MIROC5 model data under RCP2.6
#' @description data.framewith percentage cover of each totals landuse class for 2080
#' according to MIROC5 model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 2080 calculated from 30-yr yearly simulated
#' MIROC5 data centered around 2080 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp60_gfdl-esm2m_1995
#' @title Global total landuse for 30-yr period centered around 1995 derived from GFDL-ESM2M model data under RCP6.0
#' @description data.framewith percentage cover of each totals landuse class for 1995
#' according to GFDL-ESM2M model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 1995 calculated from 30-yr yearly simulated
#' GFDL-ESM2M data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp60_gfdl-esm2m_2080
#' @title Global total landuse for 30-yr period centered around 2080 derived from GFDL-ESM2M model data under RCP6.0
#' @description data.framewith percentage cover of each totals landuse class for 2080
#' according to GFDL-ESM2M model data under RCP6.0
#' @details This data.frame includes total landuse coverage for 2080 calculated from 30-yr yearly simulated
#' GFDL-ESM2M data centered around 2080 based on the representative concentration pathway RCP6.0
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp60_hadgem2-es_1995
#' @title Global total landuse for 30-yr period centered around 1995 derived from HadGEM2-ES model data under RCP6.0
#' @description data.framewith percentage cover of each totals landuse class for 1995
#' according to HadGEM2-ES model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 1995 calculated from 30-yr yearly simulated
#' HadGEM2-ES data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp60_hadgem2-es_2080
#' @title Global total landuse for 30-yr period centered around 2080 derived from HadGEM2-ES model data under RCP6.0
#' @description data.framewith percentage cover of each totals landuse class for 2080
#' according to HadGEM2-ES model data under RCP6.0
#' @details This data.frame includes total landuse coverage for 2080 calculated from 30-yr yearly simulated
#' HadGEM2-ES data centered around 2080 based on the representative concentration pathway RCP6.0
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp60_ipsl-cm5a-lr_1995
#' @title Global total landuse for 30-yr period centered around 1995 derived from IPSL-CM5A-LR model data under RCP6.0
#' @description data.framewith percentage cover of each totals landuse class for 1995
#' according to IPSL-CM5A-LR model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 1995 calculated from 30-yr yearly simulated
#' IPSL-CM5A-LR data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp60_ipsl-cm5a-lr_2080
#' @title Global total landuse for 30-yr period centered around 2080 derived from IPSL-CM5A-LR model data under RCP6.0
#' @description data.framewith percentage cover of each totals landuse class for 2080
#' according to IPSL-CM5A-LR model data under RCP6.0
#' @details This data.frame includes total landuse coverage for 2080 calculated from 30-yr yearly simulated
#' IPSL-CM5A-LR data centered around 2080 based on the representative concentration pathway RCP6.0
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp60_miroc5_1995
#' @title Global total landuse for 30-yr period centered around 1995 derived from MIROC5 model data under RCP6.0
#' @description data.framewith percentage cover of each totals landuse class for 1995
#' according to MIROC5 model data under RCP2.6
#' @details This data.frame includes total landuse coverage for 1995 calculated from 30-yr yearly simulated
#' MIROC5 data centered around 1995 based on the representative concentration pathway RCP2.6
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name landuse-totals_rcp60_miroc5_2080
#' @title Global total landuse for 30-yr period centered around 2080 derived from MIROC5 model data under RCP6.0
#' @description data.framewith percentage cover of each totals landuse class for 2080
#' according to MIROC5 model data under RCP6.0
#' @details This data.frame includes total landuse coverage for 2080 calculated from 30-yr yearly simulated
#' MIROC5 data centered around 2080 based on the representative concentration pathway RCP6.0
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name runmean31_tas
#' @title 31-year runing mean of global mean temperature
#' @description Dataframe with 31-year running mean past and future simulated global mean temperature
#' @details This data.frame depicts the 31-year running mean of global mean temperature
#' for different models and rcps.
#' @format \code{data.frame}
NULL
#'
#' @docType data
#' @name yearmean_tas
#' @title Annual global mean temperature
#' @description Dataframe with past and future simulated global annual mean temperature
#' @details This data.frame depicts the annual global mean temperature
#' for past and future time periods based on different model algorthims and rcps.
#' @format \code{data.frame}
NULL
|
30728d60155f82bad6e49799be353e542cd4fcda
|
36f4c8d36cac5b5816b9d431aa235a070751c509
|
/Calsedel 05 de Marzo del 2018 Dump y Source.R
|
359c4ae42fbe39a240636a73657db08e7d18c0a9
|
[] |
no_license
|
Laugcba/Software_Actuarial_III
|
d93a82dabe452e2c713f2e981aca8f93ec88672e
|
1c95cddc8beda42b767c7637c1cdf623fcbc8cec
|
refs/heads/master
| 2021-05-05T00:03:18.967895
| 2018-05-22T02:44:48
| 2018-05-22T02:44:48
| 119,462,774
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 4,006
|
r
|
Calsedel 05 de Marzo del 2018 Dump y Source.R
|
#Dump y source
setwd("~/GitHub/Software_Actuarial_III")
x <-"Software Actuarial III"
y <- data.frame(a=1,b="a")
dump(c("x","y"),file="data.R")
rm(x,y)
source("data.R")
#Dump y source trabajan con las instrucciones de codigo que permitan volver a construir un objeto en lugar de obtenerlo desde alguna ubicación guardada.
#CONEXIONES
#Los datos se leen usando interfaces de conexiones, normalmente las conexiones se convierten en archivos.
#file, abre la conexion de un archivo
writeLines(readLines(con), "FCFM.txt")
con <- url("https://www.fcfm.buap.mx/")
x <- readLines(con,7)
#WriteLine
con <- url("https://www.fcfm.buap.mx/")
x <- readLines(con)
x[7] <- "t<title>FCFM: Estariamos mejor con otro director </title>"
writeLines(x,"FCFM.html")
x
writeLines(x,"FCFM.html")
#SUBCONJUNTOS
#[ Regresa un objeto de la misma clase al original. Puede extraer varios elementos de la lista
#[[ Es usado para extraer un elemento de una lista o data frame. solamente extrae un solo elemento
# $ se usa para extraer elementos de una lista o un data frame por nombre
#Creamos un vector
x <- c("a","b","c","c","d","e")
#Veamos el vector
x
#Extraemos el elemento con [ ]
x[1]
x[2]
#Tambien podemos extraer una secuencia de elementos
x[1:4]
#Es posible extraer los elementos que cumplen una restricción
x[x>"b"]
#De manera equivalente se puede obtener un vector lógico
u<- x =="c"
u
x[u]
x[5-3]
x[1:2]
#Creamos una lista
x <- list(foo=1:4, bar=0.6)
x
x[1]
#El resultado es una lista que contiene al vector de la secuencia
x[[1]]
#El resultado es el vector de la secuencia de numeros
#El resultado de este fue de un vector como con doble corchete[[]]
x$foo
x["bar"]
x
x[["bar"]]
#SUBCONJUNTOS DE LISTAS
x <- list(foo=1:4, bar=0.6, baz="Hola")
#Extra elementos no secuenciales 1 si, 2 no, 3 si
x[c(1,3)]
#Extrae secuencialmente desde los elementos extraidos
#Primero extrae el 1er elemento y de ahi extrae la 3ra posición
x[[c(1,3)]]
name <- "baz"
#Los corchetes pueden usar indices calculados
x[name]
x[[name]]
x$name
#si yo quiero realizar extracciones con el signo de pesos
#es necesario escribir el nombre del objeto que quiero extraer
#Anidar subconjuntos de listas
#se pueden extraer elementos de los elementos extraidos
x <- list(a=list(10,12,14), b= list(3.14,2.81))
x[[c(1,3)]]
x[[1]][[3]]
x[[c(2,1)]]
#SUBCOJUNTOS DE UNA MATRIZ
#La extraccion de elementos en una matriz funciona de manera convencional
#con la forma ij de los elementos
x <- matrix(1:6,2,3)
x
x[1,2]
x[1,]
#Con drop= FALSE, se mantiene la dimensión y
#el resultado sera una matriz
x[1,2, drop= FALSE]
x[1, , drop=FALSE]
# R puede encontrar el nombre de algun objeto de manera parcial con $
#
x<- list(aasdfafk=1:5)
x$a
x[["a"]]
x[["a", exact=FALSE]]
#Extraccion de valores faltantes
airquality[1:6,]
complete.cases(airquality[1:6,])
complete.cases(airquality[1:10,])
completos <- complete.cases(airquality)
airquality[completos,]
data <- airquality[completos,]
data[1:6,]
#Al hacer una extraccion desde un vector logicos, obtengo
#Algunas operaciones
x<- 1:4; y <- 6:9
x+y #pocision a ´pocision
x<2
x>2
x>=2
y==8
x*y
x/y
#En matrices
x <- matrix(1:4,2,2); y <- matrix(rep(10,4),2,2)
x
y
x*y
x/y
x;y;x%*%y
y%*%x
x%*%y
#ESTRUCTURAS DE CONTROL
#Permiten manejar el flujo de la ejecucion de un programa, dependiendo
#de las condiciones al momento de correrlo.
#If, else: para probar una condición
#for: ejecutar un ciclo un determinado numero de veces
#while: ejecuta un ciclo mientras se cumpla una condición
#repeat: ejecuta un ciclo infinito
#break: termina la ejecución de un ciclo
#next: "salta" una iteración de un ciclo
#return:sale de una función
#If, else
if(condición){
#algunas intrucciones
} else {
##algunas otras intrucciones
}
x=3
if (x>5){
print("Mayor")
}else {
print("Menor")
}
x=7
if(x<5){
print("menor a 5")
}else if(x<=10){
print("entre 5 y 10")
}else {
print("Mayor a 10")
}
x<-4
y<- 3
if (x>3){
y<- 10
}else {
y<- 0
}
y <- if(x>3){
10
}else {
0
}
|
041e88e335e8da97eddc1cbb9ee393af87e7ccd2
|
3997adde33a37af6136686bd6acb9b4e2d9964b4
|
/install_glasp.R
|
46a329abe2accefbc45bf0b423e427fbb37961a5
|
[] |
no_license
|
Spain-AI/glasp-code
|
f2aa4d56ccd825e22d7305241910df0165ad613f
|
1936f2910d2eb0112fff2a97432489c178f1b2fc
|
refs/heads/master
| 2022-09-08T14:38:41.641771
| 2020-05-26T09:10:18
| 2020-05-26T09:10:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
install_glasp.R
|
devtools::install_github("jlaria/glasp", dependencies = TRUE)
|
1f7fdc9c8c54824febf36d754444e7637aae9706
|
07b5f7c7e8e990d5d4742ed0cac51d4f1bd020cf
|
/man/hc_polygon-dispatch.rd
|
7463145b075503ce4b71319fb932956513a5d2ab
|
[
"MIT"
] |
permissive
|
jokergoo/HilbertCurve
|
2e94b30d883407b44e60ace6e2655decfca261cd
|
f8143a54354e732f2353fb6e098bb3b189478270
|
refs/heads/master
| 2023-04-13T10:27:27.361062
| 2023-03-22T13:38:45
| 2023-03-22T13:38:45
| 38,319,620
| 39
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
rd
|
hc_polygon-dispatch.rd
|
\name{hc_polygon-dispatch}
\alias{hc_polygon}
\title{
Method dispatch page for hc_polygon
}
\description{
Method dispatch page for \code{hc_polygon}.
}
\section{Dispatch}{
\code{hc_polygon} can be dispatched on following classes:
\itemize{
\item \code{\link{hc_polygon,GenomicHilbertCurve-method}}, \code{\link{GenomicHilbertCurve-class}} class method
\item \code{\link{hc_polygon,HilbertCurve-method}}, \code{\link{HilbertCurve-class}} class method
}
}
\examples{
# no example
NULL
}
|
7b2212b0d77f32d35d3318f0723a9e2de1a8666b
|
8bd5557b6d1662c31f96aafe729d2137c90d4edf
|
/explore/mtcarsPlotEx02.r
|
dcf497874f75feabea6d155ea1573f24387fb510
|
[] |
no_license
|
Wenbo87/reg_models01
|
ca85339c1aa3dffc57ee7dc1316c96735476b16e
|
513a0b75c824e6fc3dbc8e936c09fd46e4449fe0
|
refs/heads/master
| 2020-12-24T17:08:00.362909
| 2014-11-23T21:47:27
| 2014-11-23T21:47:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 979
|
r
|
mtcarsPlotEx02.r
|
## Exploratory scatter plot with linear model for 'mtcars' data
## Model mpg versus transmission type
mtcarsPlotEx02 <- function() {
# Load libraries
library(datasets)
# Create vectors for 'mpg' and 'am'
mpg <- mtcars$mpg
am <- mtcars$am
# Fit linear model for mpg ~ am
fit <- lm(mpg ~ am)
fitCoef <- summary(fit)$coefficients
fitText <- c(paste("Intercept:", round(fitCoef[1, 1], digits = 4)),
paste("Intercept P-Value:", signif(fitCoef[1, 4], digits = 4)),
paste("Slope:", round(fitCoef[2, 1], digits = 4)),
paste("Slope P-Value:", signif(fitCoef[2, 4], digits = 4)))
# Scatter plot mpg ~ am
# Adds line for 'fit'
plot(am, mpg, xlab = "Transmission (0 = Auto, 1 = Manual)", ylab = "MPG")
abline(fit, col = "red", lwd = 2)
text(x = 0.65, y = seq(from = 32, to = 26, by = -2), adj = c(1, 1), labels = fitText)
title("MPG Grouped by Transmission Type")
}
|
ec4e3a001ddc6f3f8af433c35569c88ed1d9262f
|
a0830531052bd2330932c3a2c9750326cf8304fc
|
/vmstools/man/old/clipPolygons.Rd
|
67fa74992a485599d5d3c3706e6c3e7bb1bdbab6
|
[] |
no_license
|
mcruf/vmstools
|
17d9c8f0c875c2a107cfd21ada94977d532c882d
|
093bf8666cdab26d74da229f1412e93716173970
|
refs/heads/master
| 2021-05-29T20:57:18.053843
| 2015-06-11T09:49:20
| 2015-06-11T09:49:20
| 139,850,057
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,429
|
rd
|
clipPolygons.Rd
|
\name{clipPolygons}
\alias{clipPolygons}
\title{
Function to clip Polygons together
}
\description{
This function removes from a set of polygons A the areas overlapping with another set of polygons B.
}
\usage{
clipPolygons(shapeAll, europa)
}
\arguments{
\item{shapeAll}{These are the polygons A (as shapefiles) that need to be clipped}
\item{europa}{These are the polygons B (as a polyset format) used for the clipping}
}
\details{
This function is based on the joinPolys function of the PBSmapping package.
It is used in the DCF indicator 7 which calculates the total surface within a given area that is not impacted by fisheries.
A shapefile is the input to define this area (a MPA, for example). This function will remove the areas from this shapefile that might overlap with lands (stored in europa.rda), otherwise the indicator will be overestimated as land is, by definition, not impacted by fisheries.
}
\value{
A clipped resulting polyset is returned.
}
\references{EU lot 2 project}
\author{Fabrizio Manco}
\seealso{\code{indicators()}}
\examples{
\dontrun{
# Load the set of polygons used for the clipping, in our case, lands
data(europa)
shapeAll <- lonLat2SpatialPolygons(SI_LONG=c(0,1,1,0),SI_LATI=c(54,54,54.5,54.5))
# Do the clipping
clipShapeFromLand<-clipPolygons (shapeAll, europa)
# Plot the resulting clipped polygons to check
plotPolys(clipShapeFromLand)
}
}
|
72c6e7588e3569855a8baf597b668db104062999
|
b99a692b325e6d2e6419172fc37fd3107ffb79c2
|
/tests/testthat/test_hodge.R
|
5dd698a7621bd6c02208359afd940f1d6c4715e7
|
[] |
no_license
|
RobinHankin/stokes
|
64a464d291cc1d53aa6478fe9986dd69cf65ad1e
|
0a90b91b6492911328bad63f88084d7e865f70a9
|
refs/heads/master
| 2023-08-17T07:46:50.818411
| 2023-08-15T01:56:56
| 2023-08-15T01:56:56
| 177,894,352
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 877
|
r
|
test_hodge.R
|
## Some tests of hodge() ... see also test_misc.R which looks at issue #61
options(warn=999)
test_that("Function hodge() behaves itself", {
expect_true(TRUE)
foo1 <- function(x){ # checks that ***x == x, also positivity
n <- max(index(x))
discrepancy <- x |> hodge(n) |> hodge(n) |> hodge(n) |> hodge(n) - x
expect_true(issmall(discrepancy),info=x)
expect_true(all(coeffs(x %^% hodge(x,n)) >= 0))
} # foo1() closes
foo2 <- function(x,y){ # checks that *x^y == *y^x
n <- max(c(index(x),index(y)))
expect_true(issmall(hodge(x,n) ^ y - hodge(y,n) ^ x),info=list(x,y,n))
} # foo2() closes
for(i in 1:10){
jj <- rform()
foo1(jj)
}
for(i in 1:10){
x <- rform()
y <- rform()
foo2(x,y)
x <- rform(10,5,11)
y <- rform(10,5,11)
foo2(x,y)
}
})
|
00456d394e5ec4b9c4aec6ba5d18c653331e3e09
|
54634bec205f0d321851b978b32bcf2accb74dd7
|
/확률을 높이는 방법을 구색.R
|
47e25a2463ffa49dd16acf7f3cc964e804503118
|
[] |
no_license
|
kso8868/workR
|
40df0100c951d3d1e46a8f17114f31c1925711a8
|
6f07e87c6fc0cb815a7da50718dcbd3a5f06d910
|
refs/heads/master
| 2020-09-21T16:14:54.878232
| 2019-12-19T10:37:38
| 2019-12-19T10:37:38
| 224,845,101
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
확률을 높이는 방법을 구색.R
|
# 문3)
# UCLA 대학원의 입학 데이터를 불러와서 mydata에 저장한 후 다음 물음에 답하시오.
mydata <- read.csv( "https://stats.idre.ucla.edu/stat/data/binary.csv" )
head(mydata)
str(mydata)
# (1) gre, gpa, rank를 이용해 합격 여부(admit)를 예측하는 로지스틱 모델을 만드시오(0: 불합격, 1:합격).
mydata_model <- glm(admit~., data = mydata)
mydata_model
#admit = (-0.1824127) + (0.0004424*mydata$gre) + (0.1510402*mydata$gpa) + (-0.1095019*mydata$rank)
# (2) mydata에서 합격 여부(admit)를 제외한 데이터를 예측 대상 데이터로 하여 (1)에서 만든 모델에 입력하여
# 합격 여부를 예측하고 실제값과 예측값을 나타내시오.
pred <-predict(mydata_model, mydata[,2:4])
pred
pred<- round(pred, 0)
pred
A <- mydata$admit
A
head(pred,20)
# (3) 만들어진 모델의 예측 정확도를 나타내시오.
ass <- mean(pred == A)
ass
|
0d5b93a3075a28f3adafea8cb3d46da7cd305d6b
|
01c977c292984050d40baf29b1095a6efaeda8c7
|
/plot1.R
|
3ac9dffbdda7e1ffded4420b32a8b2eaf2e6a227
|
[] |
no_license
|
cwmiller21/ExData_Plotting1
|
536e61f35aa917c7c6015ab5681f8d50bfe0f160
|
8d3c232321306e0c5294bbbab179898c0a823d42
|
refs/heads/master
| 2021-01-17T22:43:22.884930
| 2014-07-13T14:01:17
| 2014-07-13T14:01:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 868
|
r
|
plot1.R
|
# This R script is designed to load the household power consumption data
# and then create plot 1 as a png file
# options
options(stringsAsFactors=FALSE, show.signif.stars = FALSE)
# load data
hpc <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.string="?")
# subset data to include only dates 2007-02-01 and 2007-02-02
# change Date to date format
hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y")
str(hpc)
doc <- unique(hpc$Date)[48:49]
doc
hpc1 <- subset(hpc, Date %in% doc)
# remove large data
rm(hpc)
# create plot 1
with(hpc1, hist(Global_active_power, col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)"))
# copy plot from screen display to png file format
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off(4) # close png but leave screen display active
|
11e0833daebe2fcf4d0d3a91bd98543389dd91b5
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.compute/man/serverlessapplicationrepository_put_application_policy.Rd
|
8dd605987ad7bb7690acb2a623c20d64d00c115b
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,509
|
rd
|
serverlessapplicationrepository_put_application_policy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/serverlessapplicationrepository_operations.R
\name{serverlessapplicationrepository_put_application_policy}
\alias{serverlessapplicationrepository_put_application_policy}
\title{Sets the permission policy for an application}
\usage{
serverlessapplicationrepository_put_application_policy(ApplicationId,
Statements)
}
\arguments{
\item{ApplicationId}{[required] The Amazon Resource Name (ARN) of the application.}
\item{Statements}{[required] An array of policy statements applied to the application.}
}
\value{
A list with the following syntax:\preformatted{list(
Statements = list(
list(
Actions = list(
"string"
),
PrincipalOrgIDs = list(
"string"
),
Principals = list(
"string"
),
StatementId = "string"
)
)
)
}
}
\description{
Sets the permission policy for an application. For the list of actions
supported for this operation, see \href{https://docs.aws.amazon.com/serverlessrepo/latest/devguide/security_iam_resource-based-policy-examples.html#application-permissions}{Application Permissions}
.
}
\section{Request syntax}{
\preformatted{svc$put_application_policy(
ApplicationId = "string",
Statements = list(
list(
Actions = list(
"string"
),
PrincipalOrgIDs = list(
"string"
),
Principals = list(
"string"
),
StatementId = "string"
)
)
)
}
}
\keyword{internal}
|
4a0f6002c47bdc59ef4b455e68e892e9c75fd5e0
|
70b237f6c2f62b26d07250470653ec5cb913631c
|
/R/direct.R
|
11f51758318cd83406c0acf75524ba50e2a6fd09
|
[] |
no_license
|
cran/sae
|
078469aaf95e06e59eae054fc366137fc72583ba
|
c5904ed07bd6bfb5219e7f209d63fa0937443cbb
|
refs/heads/master
| 2021-06-05T21:02:14.261262
| 2020-03-01T10:40:02
| 2020-03-01T10:40:02
| 17,699,442
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,076
|
r
|
direct.R
|
direct <-
function(y,dom,sweight,domsize, data, replace=FALSE) {
result <- data.frame(Domain=0,SampSize=0,Direct=0,SD=0,CV=0)
missingsweight <- missing(sweight)
missingdomsize <- missing(domsize)
# direct estimator case
# type 1: sampling without replacement
# type 2: sampling without replacement under simple random sampling (SRS)
# type 3: sampling with replacement
# type 4: sampling with replacement under SRS
if (replace==FALSE)
{
if (!missingdomsize)
{
if (!missingsweight)
type <- 1
else
type <- 2
} else
stop("domsize is required when replace=FALSE.")
} else if (replace==TRUE)
{
if (!missingsweight)
{
if (!missingdomsize)
type <- 3
else
stop("domsize is required when replace=TRUE and sweight is used.")
} else
type <- 4
} else
stop("replace=",replace," must be TRUE or FALSE.")
# classdata <- class(data)
# if (classdata=="data.frame")
if (!missing(data))
{
y <- data[,deparse(substitute(y))]
dom <- data[,deparse(substitute(dom))]
if (!missingsweight)
sweight <- data[,deparse(substitute(sweight))]
}
if(!missingdomsize)
if (any(is.na(domsize)))
stop(" domsize with NA values.")
A<-length(y)
B<-length(dom)
if (!missingsweight)
{
C<-length(sweight)
if((A!=B ) | (A!=C))
stop(" y [",A,"], dom [",B,"] and sweight [",C,"] must be the same length.")
} else
if(A!=B)
stop(" y [",A,"] and dom [",B,"] must be the same length.")
# Delete rows with NA values
rowNA <- c(which(is.na(y)),which(is.na(dom)))
if (!missingsweight)
rowNA <- c(rowNA,which(is.na(sweight)))
if (length(rowNA)>0)
{
y <- y[-rowNA]
dom <- dom[-rowNA]
if (!missingsweight)
sweight <- sweight[-rowNA]
}
# Test the domains are the same
did <- unique(dom) # unique identifiers of domains
Dsample <- length(did) # number of domains in sample
if (!missingdomsize)
{
for (d in 1:Dsample)
{
ntimesdomi <- sum(did[d]==domsize[,1])
if (ntimesdomi!=1)
stop("Some sample domain indicators (dom) are not defined in population domain indicators.")
}
}
# Calculate HT direct estimator for sampled domains
nds <-rep(0,Dsample) # domain sample sizes
dirds <-rep(0,Dsample) # domain direct estimators
vardirds <-rep(0,Dsample) # variances of direct estimators
for (d in 1:Dsample)
{
yd <- y[dom==did[d]]
nds[d] <- length(yd)
if (type==1)
{
sweightd <- sweight[dom==did[d]]
domsized <- domsize[(domsize[,1]==did[d]),2]
dirds[d] <- sum(yd*sweightd)/domsized
# Approximated unbiased estimator of variance of HT direct estimator
vardirds[d]<-sum(sweightd*(sweightd-1)*(yd^2))/(domsized^2)
} else
if (type==2)
{
domsized <- domsize[(domsize[,1]==did[d]),2]
fd <- nds[d]/domsized
Sd2 <- var(yd)
dirds[d] <- sum(yd)/nds[d]
vardirds[d] <- (1-fd)*Sd2/nds[d]
} else
if (type==3)
{
sweightd <- sweight[dom==did[d]]
domsized <- domsize[(domsize[,1]==did[d]),2]
fd <- nds[d]/domsized
dirds[d] <- sum(yd*sweightd)/domsized
vardirds[d]<- sum((fd*sweightd*yd-dirds[d])^2)/nds[d]
} else #type=4
{
Sd2 <- var(yd)
dirds[d] <- sum(yd)/nds[d]
vardirds[d] <- Sd2/nds[d]
}
}
# direct estimator for non sampled domains
if (!missingdomsize)
{
D <- nrow(domsize)
if (D>Dsample)
{
missing <- rep(NA,D-Dsample)
nd <- c(nds,rep(0,D-Dsample)) # Domain direct estimators
dird <- c(dirds,missing) # Domain direct estimators
vardird <- c(vardirds,missing) # Variances of direct estimators
domns <- rep(0,D-Dsample)
j<-1
for (i in 1:D)
{
domi <- domsize[i,1]
if (sum(domi==did)==0)
{
domns[j] <- domi
j <- j+1
}
}
did <- c(did,domns)
} else
{
dird <- dirds
vardird <- vardirds
nd <- nds
}
} else
{
dird <- dirds
vardird <- vardirds
nd <- nds
}
# Percent coeficient of variation of direct estimator
cvdird<-100*sqrt(vardird)/dird
result <- data.frame(Domain=did,SampSize=nd,Direct=dird,SD=sqrt(vardird),CV=cvdird)
roworder <- order(result[,1])
result <- result[roworder,]
return(result)
}
|
a758f39a89c3c7fe17e26d9d28c3288288e2b38a
|
ca47052858a683345a6dac4da5c8df0e676fd3c3
|
/man/whichFishAdd.Rd
|
99d6422d80ba5241d78d33d7eeff0daa9e809a34
|
[
"MIT"
] |
permissive
|
GabrielNakamura/FishPhyloMaker
|
b9a54cf366cc73c7fb485474ff07211facaad5dc
|
fb3441de070c2099803c03eb923628e1847ff64e
|
refs/heads/main
| 2023-04-12T13:42:02.136319
| 2023-02-17T23:29:59
| 2023-02-17T23:29:59
| 336,899,540
| 6
| 6
|
NOASSERTION
| 2021-09-25T16:55:46
| 2021-02-07T22:01:58
|
R
|
UTF-8
|
R
| false
| true
| 952
|
rd
|
whichFishAdd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/whichFishAdd.R
\name{whichFishAdd}
\alias{whichFishAdd}
\title{Function to inform which species must be added to the mega-tree phylogeny in the insertion process.}
\usage{
whichFishAdd(data)
}
\arguments{
\item{data}{A data frame with three column containing the name of species (s), the Family (f) and Order (o). This
can be generated with function \code{\link{FishTaxaMaker}}}
}
\value{
A data frame containing a column informing at which level the species in data must be added.
}
\description{
Function to inform which species must be added to the mega-tree phylogeny in the insertion process.
}
\details{
This function can be used in order to known which species that must be added in the insertion process
made by \code{\link{FishPhyloMaker}}.
}
\examples{
\donttest{
data("taxon_data_PhyloMaker")
res_test <- whichFishAdd(data = taxon_data_PhyloMaker)
}
}
|
6abd9d5a9f3f1d763ca8ed130caf0946eed10df2
|
e395badb85f0194d29053a5e6e810b2ab5f9b9b4
|
/server.R
|
f0b36edc2847b15b0d219f204fc4439cf5a1cb17
|
[] |
no_license
|
CurlySheep/558shiny
|
c020133af5ca8a05fa4235141de1b4aefa3856c2
|
2e69091a5b101af6b49e6a9497714affaf84bf55
|
refs/heads/main
| 2023-07-08T18:19:28.988133
| 2021-08-03T03:31:05
| 2021-08-03T03:31:05
| 391,017,769
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,803
|
r
|
server.R
|
library(shiny)
library(shinydashboard)
library(DT)
library(ggplot2)
library(tidyverse)
library(highcharter)
library(gbm)
library(caret)
shinyServer(function(input, output, session) {
#####################Tag 1####################
# Create URL links
output$link1 <- renderUI({
tagList("Health Commission of Wenzhou:", a("Note that all the Health Commission websites are in Chinese", href="http://wjw.wenzhou.gov.cn/"))
})
output$link2 <- renderUI({
tagList("Shanghai Municipal Health Commission:", a("Click here!", href="https://wsjkw.sh.gov.cn/"))
})
output$link3 <- renderUI({
tagList("Hangzhou Municipal Health Commission:", a("Click here!", href="http://wsjkw.hangzhou.gov.cn/"))
})
output$link4 <- renderUI({
tagList("Xinyang Health Commission:", a("Click here!", href="http://wsjkw.xinyang.gov.cn/index.html"))
})
output$link5 <- renderUI({
tagList("Hefei Municipal Health Commission:", a("IPs outside of the Mainland China may be banned from accessing this website and I don't know why", href="http://wjw.hefei.gov.cn/"))
})
output$link6 <- renderUI({
tagList("Weather Underground:", a("The only website in English!", href="https://www.wunderground.com/"))
})
output$link7 <- renderUI({
tagList("You can find detailed information about AQI here:", a("Wiki page.", href="https://en.wikipedia.org/wiki/Air_quality_index"))
})
#####################Tag 2#####################
# Switch the input city
cityinput <- reactive({
switch(input$city_selecte,
"All" = "All",
"Hangzhou" = "HZ",
"Shanghai" = "SH",
"Hefei" = "HF",
"Wenzhou" = "WZ",
"Xinyang" = "XY")
})
# Create the data frame
output$dataf <- renderDataTable({
index <- cityinput()
if (index == "All"){
air_data %>%
select(city, date, input$var_selecte) %>%
datatable()
} else{
air_data %>%
filter(city==index) %>%
select(city, date, input$var_selecte) %>%
datatable()
}
})
# Create the download function
output$down_dat <- downloadHandler(
filename = function(){"Dataframe.csv"},
content = function(fname){
index <- cityinput()
if (index == "All"){
temp <- air_data %>%
select(city, date, input$var_selecte)
write.csv(temp, fname)
} else{
temp <- air_data %>%
filter(city==index) %>%
select(city, date, input$var_selecte)
write.csv(temp, fname)
}
}
)
#####################Tag 3#####################
# Create subset data set
Inputdata <- reactive({
if(length(input$city_selecte_tag3)==0){
return(air_data %>% select(city, date, input$var_selecte_tag3))
}else{
temp <- air_data %>%
filter(city %in% input$city_selecte_tag3) %>%
select(city, date, input$var_selecte_tag3)
return(temp)
}
})
# Draw the plot
Outputplot <- reactive({
if (input$plot_select=="Box plot"){
gplot <- ggplot(data = Inputdata()) + geom_boxplot(aes(x=city,y=Inputdata()[,input$var_selecte_tag3])) +
theme_bw() + labs(x='City',y=input$var_selecte_tag3)
#} else{
# if (input$plot_select=="Time series (line plot)"){
# gplot <- ggplot(data = Inputdata()) + geom_line(aes(x=date, y=Inputdata()[,input$var_selecte_tag3],
# group=city,color=city),size=1.5) +
# theme_bw() + labs(x='Date',y=input$var_selecte_tag3)
# gplot <- hc_plot(Inputdata(), input$city_selecte_tag3, input$var_selecte_tag3)
} else{
gplot <- ggplot(data = Inputdata()) + geom_bar(aes(x=date, y=Inputdata()[,input$var_selecte_tag3],
fill=city), stat = "identity", position = "dodge") +
theme_bw() + labs(x='Date',y=input$var_selecte_tag3)
}
return(gplot)
})
# High charter plot
output$hc_plot <- renderHighchart({
if (input$down_type=="Box/Bar plot"){
hc_plot(Inputdata(), input$city_selecte_tag3, input$var_selecte_tag3)
} else{
hc_plot(Inputdata(), input$city_selecte_tag3, input$var_selecte_tag3) %>%
hc_exporting(
enabled = T,
url = "https://export.highcharts.com",
formAttributes = list(target = "_blank"),
buttons = list(contextButton = list(
text = "Export",
theme = list(fill = "transparent"),
menuItems = export
))
)
}
})
# Output the plot
output$plot_tag3 <- renderPlot({
req(Outputplot())
Outputplot()
})
# Download function for Box/Bar plot
output$down_plot <- downloadHandler(
filename = function(){paste0("plot_tag3", ".png")},
content = function(file){
ggsave(file, plot = Outputplot())
}
)
# Download function for highcharter
#output$down_hc_plot <- downloadHandler(
# filename = function(){paste0("hc_plot", ".png")},
# content = function(file){
# png(file, width=800, height=800)
# hc_plot(Inputdata(), input$city_selecte_tag3, input$var_selecte_tag3)
# dev.off()
# }
#)
# Create summary table
output$table_tag3 <- renderTable({
tempvar <- quo(!!sym(input$var_selecte_tag3))
temp <- Inputdata() %>%
select(city, !!tempvar) %>%
group_by(city) %>%
summarise(Min = min(!!tempvar), `1st Qu` = quantile(!!tempvar, 0.25) , Mean = mean(!!tempvar, na.rm=T),
`3rd Qu` = quantile(!!tempvar, 0.75), Max = max(!!tempvar), inputqu = quantile(!!tempvar, input$perc))
names(temp)[7] <- paste0(as.character(input$perc)," Qu")
temp
})
# Update Input
observeEvent(input$var_select == 'Simple example', {
updateSelectInput(session, inputId = "var_tag4", selected = if(input$var_select == 'Simple example'){c("AQI","PM10","CO")}else{NA})
updateSliderInput(session, inputId = "cv_fold", value = if(input$var_select == 'Simple example'){5}else{3})
updateSliderInput(session, inputId = "size", value = if(input$var_select == 'Simple example'){0.8}else{0.5})
})
#####################Tag 4#####################
# Train/Test data split
Splitdata <- reactive({
set.seed(233)
index <- sample(1:nrow(air_data),size = input$size*nrow(air_data))
train <- air_data[index,]
test <- air_data[-index,]
return(list(Train=train, Test=test))
})
# Create formula first
formu <- reactive({
if (length(input$var_tag4)==0){
return(formula(paste0(input$Re_tag4,'~','AQI+level+PM2.5+PM10+SO2+CO+NO2+O3_8h+high_tem+low_tem')))
} else{
n <- length(input$var_tag4)
temp <- paste0(input$var_tag4,c(rep("+",n-1),""))
temp <- paste0(temp, collapse = "")
return(formula(paste0(input$Re_tag4, '~', temp)))
}
})
# Fit the Linear model
fit_lm <- eventReactive(input$gobutton,{
fit.lm <- lm(formu(), data = Splitdata()[["Train"]])
return(fit.lm)
})
# Fit the boosted tree model
fit_Tree <- eventReactive(input$gobutton,{
gbm.fit <- gbm(
formula = formu(),
distribution = "gaussian",
data = Splitdata()[["Train"]],
n.trees = 500,
cv.folds = input$cv_fold,
n.cores = NULL, # will use all cores by default
verbose = FALSE
)
return(gbm.fit)
})
# Fit Random Forest model
fit_random <- eventReactive(input$gobutton,{
trctrl <- trainControl(method = "repeatedcv", number=input$cv_fold, repeats=1)
rf_grid <- expand.grid(mtry = 1:11)
rf_train <- train(formu(),
data= Splitdata()[["Train"]],
method='rf',
trControl=trctrl,
tuneGrid = rf_grid,
preProcess=c("center", "scale"))
return(rf_train)
})
# Output summary for linear
output$summary_lm <- renderPrint({
if (input$gobutton){
summary(fit_lm())
}
})
# Output summary for tree
output$summary_tree <- renderPrint({
if (input$gobutton){
fit_Tree()
}
})
# Output summary for random forest
output$summary_random <- renderPrint({
if (input$gobutton){
fit_random()[["results"]]
}
})
# Output RMSE and Test MSE
output$RMSE <- renderTable({
RMSE_lm <- sqrt(mean(fit_lm()$residuals^2))
RMSE_Tree <- sqrt(mean((fit_Tree()$fit-Splitdata()[["Train"]][,input$Re_tag4])^2))
RMSE_random <- min(fit_random()$results$RMSE)
temp <- data.frame(method = c('Linear Regression', 'Boosted Tree', 'Random Forest'), RMSE = rep(NA,3),
`Test.MSE`=rep(NA,3))
temp$RMSE <- c(RMSE_lm, RMSE_Tree, RMSE_random)
Test <- Splitdata()[["Test"]]
obs <- Test[,input$Re_tag4]
MSE_lm <- RMSE(predict(fit_lm(), Test), obs)
MSE_Tree <- RMSE(predict(fit_Tree(),n.trees =fit_Tree()$n.trees, Test), obs)
MSE_random <- RMSE(predict(fit_random()$finalModel, Test),obs)
temp$`Test.MSE` <- c(MSE_lm, MSE_Tree, MSE_random)
temp
})
# Predict
output$predict <- eventReactive(input$prebutton, {
temp <- air_data[1,]
temp$AQI <- input$AQI
temp$level <- input$level
temp$PM2.5 <- input$PM2
temp$PM10 <- input$PM10
temp$SO2 <- input$SO2
temp$CO <- input$CO
temp$NO2 <- input$NO2
temp$O3_8h <- input$O3
temp$high_tem <- input$high_tem
temp$low_tem <- input$low_tem
if (input$model_select=="Linear Regression"){
return(as.numeric(predict(fit_lm(),temp)))
} else{
if (input$model_select=="Boosted Tree"){
return(as.numeric(predict(fit_Tree(),n.trees =fit_Tree()$n.trees,temp)))
} else{
return(as.numeric(predict(fit_random()$finalModel,temp)))
}
}
})
})
|
7395b23a9c268ebfb7d3ffa8b98ef3366e7dc5bb
|
5d249985365879bbaf10c47ac1b1fff6c8347724
|
/plot1.R
|
ac19ca0573dc9a5c4520eee32657abeff0369450
|
[] |
no_license
|
vanitu/ExData_Plotting1
|
e4a4a6f68eef2b20a2d689aeca99c2a767cf9fe1
|
52367a3067dabc7f5c3f981c499c9029f61cc8bc
|
refs/heads/master
| 2021-01-17T22:33:53.831093
| 2016-05-31T06:35:46
| 2016-05-31T06:35:46
| 60,061,153
| 0
| 0
| null | 2016-05-31T05:41:36
| 2016-05-31T05:41:35
| null |
UTF-8
|
R
| false
| false
| 362
|
r
|
plot1.R
|
library(dplyr)
library(ggplot2)
library(data.table)
consumpt<-fread('consumpt_1-2Feb2007.csv',na.strings=c("?",",,"))
#Create a PLOT1
hist(consumpt$Global_active_power,
xlab="Global Active Power,(kilowatts)",
col='green',
main='Global Active Power'
)
#Copy Plot to PNG device
dev.copy(png, file = "plot1.png",width=480,height=480)
dev.off()
|
c4a6a74cb1314abcfaf0fb99cbca8f5cfd8a564e
|
6b1f60e568efe261b11e9bd0cb076f29551442fa
|
/Prediction_of_air_pollution_index_based_on_history/Arimamodel.R
|
fcb2d66441ea3db2a668df73b6614b8037b73ced
|
[] |
no_license
|
Rishabh1998/Prediction_of_air_pollution_index_based_on_history
|
beada495f2f55e9294c052e028bbf35d57b50365
|
43297ae894fc1fb308f07acdaf2adf81c53f76f7
|
refs/heads/master
| 2020-03-30T11:56:22.722950
| 2018-07-25T16:11:12
| 2018-07-25T16:11:12
| 151,200,725
| 0
| 0
| null | 2018-10-02T04:38:01
| 2018-10-02T04:36:30
|
R
|
UTF-8
|
R
| false
| false
| 13,495
|
r
|
Arimamodel.R
|
packages <- c("dplyr", "lubridate", "ggplot2", "hydroGOF", "e1071", "forecast", "tseries", "padr")
if(length(setdiff(packages, rownames(installed.packages()))) > 0){
install.packages(setdiff(packages, rownames(installed.packages())))
}
lapply(packages, require, character.only = TRUE)
setwd("C:/Users/Jhingalala/Desktop")
dir.create("Kanpur_Vansh")
setwd("Kanpur_Vansh")
df <- read.csv("C:/Users/Jhingalala/Downloads/aqi-data-kanpur.csv", header = FALSE, na.strings = " None", col.names = c("Site_Code", "Place", "Pollutant", "Time", "pollution_Level"))
df <- df[,-2:-1]
ggplot(df, aes(Time, pollution_Level)) + geom_line() + ylab("Pollution Level") +
xlab("Time")
df$Time <- dmy_hm(df$Time)
minute(df$Time) <- 0
df1<- df[df$Time %within% interval(ymd("2012-04-02"), ymd_hm("2017-12-31 23:00")),]
df1$Pollutant <- as.factor(trimws(df1$Pollutant))
# df$Pollutant <- (trimws(df$Pollutant)) #character type output
dfsplit <- split(df1, df1$Pollutant)
#lapply(dfsplit, summary)
#lapply(dfsplit, str)
ggplot(dfsplit$CO, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("CO")
ggplot(dfsplit$SO2, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("SO2")
ggplot(dfsplit$NO2, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("NO2")
ggplot(dfsplit$PM2.5, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("PM2.5")
ggplot(dfsplit$OZONE, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("OZONE")
dfsplit <- lapply(dfsplit,
function(x){
if(hour(min(x$Time)) != 0){
x <- x[x$Time %within% interval(ymd(paste(year(min(x$Time)),"-",month(min(x$Time)),"-",day(min(x$Time)) + 1)), ymd_hm("2017-12-31 23:00")),]
}else{
x<-x
}
x <- x[-1]
}
)
# lapply(dfsplit,dim)
first <- dfsplit$OZONE
first <- pad(first, interval = "hour")
l <- c()
k <- c()
for(i in 1:(length(first$Time)-1)){
if(as.integer(difftime(first$Time[i+1], first$Time[i], units = "hours")) != 1){
l <- c(l,i-1,i,i+1)
k <- c(k,i)
}
}
#first[l,]
for(i in k){
if(!is.na(first[i,2]) && !is.na(first[i+1,2])){
first[i,2] <- (first[i,2]+first[i+1,2])/2
first[i+1,2] <- first[i,2]
}else if(is.na(first[i+1,2])){
first[i+1,2] <- first[i,2]
}else{
first[i,2] <- first[i+1,2]
}
first <- first[-1 * i,]
}
# first[l,]
dfsplit$OZONE <- first
first <- dfsplit$CO
first <- pad(first, interval = "hour")
l <- c()
k <- c()
for(i in 1:(length(first$Time)-1)){
if(as.integer(difftime(first$Time[i+1], first$Time[i], units = "hours")) != 1){
l <- c(l,i-1,i,i+1)
k <- c(k,i)
}
}
# first[l,]
for(i in k){
if(!is.na(first[i,2]) && !is.na(first[i+1,2])){
first[i,2] <- (first[i,2]+first[i+1,2])/2
first[i+1,2] <- first[i,2]
}else if(is.na(first[i+1,2])){
first[i+1,2] <- first[i,2]
}else{
first[i,2] <- first[i+1,2]
}
first <- first[-1 * i,]
}
# first[l,]
dfsplit$CO <- first
first <- dfsplit$NO2
first <- pad(first, interval = "hour")
l <- c()
k <- c()
for(i in 1:(length(first$Time)-1)){
if(as.integer(difftime(first$Time[i+1], first$Time[i], units = "hours")) != 1){
l <- c(l,i-1,i,i+1)
k <- c(k,i)
}
}
# first[l,]
for(i in k){
if(!is.na(first[i,2]) && !is.na(first[i+1,2])){
first[i,2] <- (first[i,2]+first[i+1,2])/2
first[i+1,2] <- first[i,2]
}else if(is.na(first[i+1,2])){
first[i+1,2] <- first[i,2]
}else{
first[i,2] <- first[i+1,2]
}
first <- first[-1 * i,]
}
# first[l,]
dfsplit$NO2 <- first
first <- dfsplit$PM2.5
first <- pad(first, interval = "hour")
l <- c()
k <- c()
for(i in 1:(length(first$Time)-1)){
if(as.integer(difftime(first$Time[i+1], first$Time[i], units = "hours")) != 1){
l <- c(l,i-1,i,i+1)
k <- c(k,i)
}
}
# first[l,]
for(i in k){
if(!is.na(first[i,2]) && !is.na(first[i+1,2])){
first[i,2] <- (first[i,2]+first[i+1,2])/2
first[i+1,2] <- first[i,2]
}else if(is.na(first[i+1,2])){
first[i+1,2] <- first[i,2]
}else{
first[i,2] <- first[i+1,2]
}
first <- first[-1 * i,]
}
# first[l,]
dfsplit$PM2.5 <- first
first <- dfsplit$SO2
first <- pad(first, interval = "hour")
l <- c()
k <- c()
for(i in 1:(length(first$Time)-1)){
if(as.integer(difftime(first$Time[i+1], first$Time[i], units = "hours")) != 1){
l <- c(l,i-1,i,i+1)
k <- c(k,i)
}
}
# first[l,]
for(i in k){
if(!is.na(first[i,2]) && !is.na(first[i+1,2])){
first[i,2] <- (first[i,2]+first[i+1,2])/2
first[i+1,2] <- first[i,2]
}else if(is.na(first[i+1,2])){
first[i+1,2] <- first[i,2]
}else{
first[i,2] <- first[i+1,2]
}
first <- first[-1 * i,]
}
# first[l,]
dfsplit$SO2 <- first
insertRow2 <- function(existingDF, newrow, r) {
existingDF <- rbind(existingDF,newrow)
existingDF <- existingDF[order(c(1:(nrow(existingDF)-1),r-0.5)),]
row.names(existingDF) <- 1:nrow(existingDF)
return(existingDF)
}
ggplot(dfsplit$CO, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("CO")
ggplot(dfsplit$SO2, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("SO2")
ggplot(dfsplit$NO2, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("NO2")
ggplot(dfsplit$PM2.5, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("PM2.5")
ggplot(dfsplit$OZONE, aes(Time, pollution_Level)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("OZONE Data Wrangled")
if(FALSE){
dfsplit <- lapply(dfsplit,
function(x){
for(i in seq(length(x$Time)-1)){
if(as.integer(difftime(x$Time[i+1], x$Time[i], units = "hours")) != 1){
z <- seq.POSIXt((x$Time[i] + hours(1)), (x$Time[i+1] - hours(1)), by = "hours")
for(j in 1:length(z)){
x <- insertRow2(x, list(x$Pollution[i], z[j], NA), i + j)
}
}
}
}
)
}
# lapply(dfsplit, summary)
dfsplit1 <- lapply(dfsplit,
function(x){
count_ts = ts(x[,c('pollution_Level')])
x$clean_pollution_mean = tsclean(count_ts)
x$clean_pollution_mean_ma = ma(x$clean_pollution_mean, order = 24) # using the clean count with no outliers
# x$clean_pollution_mean_ma_weekly = ma(x$clean_pollution_mean, order=24 * 7)
# x$clean_pollution_mean_ma_monthly = ma(x$clean_pollution_mean, order = 24 * 30)
# considering month of 30 days
return(x)
}
)
# lapply(dfsplit, head)
# lapply(dfsplit, summary)
# lapply(dfsplit, dim)
# lapply(dfsplit1, head)
# lapply(dfsplit1, summary)
# lapply(dfsplit1, dim)
if(FALSE){
count_ts = ts(dfsplit$CO[,c('pollution_mean')])
dfsplit$CO$clean_pollution_mean = tsclean(count_ts)
count_ts = ts(dfsplit$NO2[,c('pollution_mean')])
dfsplit$NO2$clean_pollution_mean = tsclean(count_ts)
count_ts = ts(dfsplit$OZONE[,c('pollution_mean')])
dfsplit$OZONE$clean_pollution_mean = tsclean(count_ts)
count_ts = ts(dfsplit$PM2.5[,c('pollution_mean')])
dfsplit$PM2.5$clean_pollution_mean = tsclean(count_ts)
count_ts = ts(dfsplit$SO2[,c('pollution_mean')])
dfsplit$SO2$clean_pollution_mean = tsclean(count_ts)
ggplot(dfsplit1$CO, aes(Time, clean_pollution_mean)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("CO")
ggplot(dfsplit1$SO2, aes(Time, clean_pollution_mean)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("SO2")
ggplot(dfsplit1$NO2, aes(Time, clean_pollution_mean)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("NO2")
ggplot(dfsplit1$PM2.5, aes(Time, clean_pollution_mean)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("PM2.5")
ggplot(dfsplit1$OZONE, aes(Time, clean_pollution_mean)) + geom_line() + ylab("pollution Level") +
xlab("Time") + ggtitle("OZONE")
dfsplit1$CO$clean_pollution_mean_ma = ma(dfsplit1$CO$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$SO2$clean_pollution_mean_ma = ma(dfsplit1$SO2$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$NO2$clean_pollution_mean_ma = ma(dfsplit1$NO2$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$OZONE$clean_pollution_mean_ma = ma(dfsplit1$OZONE$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$PM2.5$clean_pollution_mean_ma = ma(dfsplit1$PM2.5$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$CO$clean_pollution_mean_ma_weekly = ma(dfsplit1$CO$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$SO2$clean_pollution_mean_ma_weekly = ma(dfsplit1$SO2$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$NO2$clean_pollution_mean_ma_weekly = ma(dfsplit1$NO2$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$OZONE$clean_pollution_mean_ma_weekly = ma(dfsplit1$OZONE$clean_pollution_mean, order=24) # using the clean count with no outliers
dfsplit1$PM2.5$clean_pollution_mean_ma_weekly = ma(dfsplit1$PM2.5$clean_pollution_mean, order=24) # using the clean count with no outliers
ggplot() +
geom_line(data = dfsplit1$CO, aes(x = Time, y = clean_pollution_mean, colour = "Counts")) +
geom_line(data = dfsplit1$CO, aes(x = Time, y = clean_pollution_mean_ma, colour = "Hourly Moving Average")) +
ylab('Cleaned Pollutant') + ggtitle('CO')
ggplot() +
geom_line(data = dfsplit1$PM2.5, aes(x = Time, y = clean_pollution_mean, colour = "Counts")) +
geom_line(data = dfsplit1$PM2.5, aes(x = Time, y = clean_pollution_mean_ma, colour = "Hourly Moving Average")) +
ylab('Cleaned Pollutant') + ggtitle('PM2.5')
ggplot() +
geom_line(data = dfsplit1$NO2, aes(x = Time, y = clean_pollution_mean, colour = "Counts")) +
geom_line(data = dfsplit1$NO2, aes(x = Time, y = clean_pollution_mean_ma, colour = "Hourly Moving Average")) +
ylab('Cleaned Pollutant') + ggtitle('NO2')
ggplot() +
geom_line(data = dfsplit1$OZONE, aes(x = Time, y = clean_pollution_mean, colour = "Counts")) +
geom_line(data = dfsplit1$OZONE, aes(x = Time, y = clean_pollution_mean_ma, colour = "Hourly Moving Average")) +
ylab('Cleaned Pollutant') + ggtitle('OZONE')
ggplot() +
geom_line(data = dfsplit1$SO2, aes(x = Time, y = clean_pollution_mean, colour = "Counts")) +
geom_line(data = dfsplit1$SO2, aes(x = Time, y = clean_pollution_mean_ma, colour = "Hourly Moving Average")) +
ylab('Cleaned Pollutant') + ggtitle('SO2')
}
############### Evaluating for OZONE ##################################
count_ma = ts(na.omit(dfsplit1$OZONE$clean_pollution_mean_ma), frequency = 30 * 24) # time series
decomp = stl(count_ma, s.window = "periodic")
deseasonal_cnt <- seasadj(decomp)
plot(decomp)
# Augmented Dickey-Fuller Test
adf.test(count_ma, alternative = "stationary") # t-test
Acf(count_ma, main='')
Pacf(count_ma, main='')
count_d1 = diff(deseasonal_cnt, differences = 1)
plot(count_d1)
adf.test(count_d1, alternative = "stationary")
Acf(count_d1, main='ACF for Differenced Series')
Pacf(count_d1, main='PACF for Differenced Series')
auto.arima(deseasonal_cnt, seasonal=FALSE)
fit<-auto.arima(deseasonal_cnt, seasonal=FALSE)
tsdisplay(residuals(fit), lag.max=200, main='(1,1,2) Model Residuals')
accuracy(fit)[2]
fit2 = arima(deseasonal_cnt, order=c(1,1,24))
accuracy(fit2)[2]
fit2
tsdisplay(residuals(fit2), lag.max=200, main='Seasonal Model Residuals')
fcast <- forecast(fit2, h=24 * 30)
plot(fcast)
###################### Seasonal Changes #####################################
fit_w_seasonality = auto.arima(deseasonal_cnt, seasonal=TRUE)
fit_w_seasonality
tsdisplay(residuals(fit_w_seasonality), lag.max=200, main='(1,1,2) Model Residuals')
seas_fcast <- forecast(fit_w_seasonality, h=60 * 24)
plot(seas_fcast)
|
f5fcca3f2422df5e86f3a261438f6fe130d7f31d
|
a0b77be4c1b958f282aa4882fdd38f06c837cae6
|
/SImulations fcn.R
|
a9ae5c25e9519248055257409b4c1b2185c3c582
|
[] |
no_license
|
katherineliuu/rf-research
|
dfbb515600259d904ac39021287fb6ef3ae75be7
|
af614811d76f75139c77b66d1bce03ec99d047f3
|
refs/heads/master
| 2020-06-06T17:31:27.006344
| 2019-08-05T16:37:43
| 2019-08-05T16:37:43
| 192,806,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,091
|
r
|
SImulations fcn.R
|
# Simulation studies to investigate RF and LM prediction intervals
library(randomForestSRC)
avgResults <- array(NA, dim=c(8,3)) # average MSE, meanWIdth, coverage rate for 4 simulations
RFOOBInterval <- function(x,
y,
x0,
ntree = 1000,
alpha = 0.10,
symmetry = TRUE,
mtry = if (!is.null(y) && !is.factor(y))
max(floor(ncol(x)/3), 1) else floor(sqrt(ncol(x))),
nodesize = if (!is.null(y) && !is.factor(y)) 5 else 1){
x <- as.matrix(x)
x0 <- as.matrix(x0)
colnames(x) <- 1:ncol(x)
rownames(x) <- 1:nrow(x)
colnames(x0) <- 1:ncol(x0)
rownames(x0) <- 1:nrow(x0)
n = nrow(x)
ntest = nrow(x0)
rf = randomForest(x=x, y=y, ntree=ntree, mtry = mtry, nodesize = nodesize,
keep.forest=TRUE, keep.inbag=TRUE)
test_pred <- predict(rf, x0)
oob_abs_error = sort(abs(y - rf$predicted))
oob_error = sort(y - rf$predicted)
upper_pred = rep(NA, ntest)
lower_pred = rep(NA, ntest)
## symmetry = TRUE leads to the symmetric OOB Intervals
## symmetry = FALSE leads to the standard OOB Intervals
if(symmetry){
for (i in 1:ntest){
upper_pred[i] = test_pred[i] + quantile(oob_abs_error,1-alpha)
lower_pred[i] = test_pred[i] - quantile(oob_abs_error,1-alpha)
}
}
else{
for (i in 1:ntest){
upper_pred[i] = test_pred[i] + quantile(oob_error, 1-alpha/2)
lower_pred[i] = test_pred[i] + quantile(oob_error, alpha/2)
}
}
return(list(pred = matrix(test_pred,ntest,1),
lo = matrix(lower_pred,ntest,1),
up = matrix(upper_pred,ntest,1),
fit = matrix(predict(rf,x),n,1)))
} #### function OOB prediction interval
getResults <- function(train, test, nodesize) {
results <- array(NA, dim=c(2,3)) #MSE, meanWIdth, coverage rate
#LR
M <- lm(data=train, y~.)
yhat <- predict(M, newdata=test) # predictions
PI<- predict(M, newdata=test, interval="prediction", level=0.95) # prediction intervals with 95%
results[1,1] <- mean((yhat - test$y)^2) #MSE
results[1,2] <- mean(PI[,3]-PI[,2]) #meanWidth
Rlr <- test$y>=PI[,2] & test$y<=PI[,3]
results[1,3] <- sum(Rlr)/nrow(test) #percentPI
#RF
o <- quantreg(y ~ ., train, splitrule = "mse", nodesize = nodesize) ## quantile regression with mse splitting
o.test <- quantreg(object=o, newdata=test) # test call
quant.dat <- get.quantile(o.test, c(.025, .975))
results[2,1] <- o.test$err.rate[n/2] #MSE
results[2,2] <- mean(quant.dat[,2]-quant.dat[,1]) #meanWidth
Rrf <- test$y>=quant.dat[,1] & test$y<=quant.dat[,2]
results[2,3] <- sum(Rrf)/nrow(test) #percentPI
return(results)
} ##function
iter <- 10
allResults <- array(NA, dim=c(2,3, iter))
OOBResults <- array(NA, dim=c(2,3,iter))
###################################################################
# Simulation 1: Linear Model (one variable)
set.seed(06232016)#set seed
for(i in 1:iter){
n <- 100 #number of observations (trainig and test set combined)
sig <- 5 #error standard deviation
x <- runif(n, 0, 10) #generate x variable Unif(0,10) distribution
e <- rnorm(n, 0, sig) #generate error term
y <- 2*x+3 + e #generate response values
data <- data.frame(x, y) #combine explanatory and response varables into data frame
train <- data[1:(n/2), ] #use first half of observations as training data
test <- data[(n/2+1):n, ] #use second half of observations as test data
allResults[,,i] <- getResults(train, test, 10)
OOBResults[,,i] <- RFOOBInterval(x= train$x, y=train$y, x0 = test$x, symmetry = TRUE)
}
allResults1
OOBResults
avgResults[1,1] <- mean(allResults1[1,1,])
avgResults[1,2] <- mean(allResults1[1,2,])
avgResults[1,3] <- mean(allResults1[1,3,])
avgResults[2,1] <- mean(allResults1[2,1,])
avgResults[2,2] <- mean(allResults1[2,2,])
avgResults[2,3] <- mean(allResults1[2,3,])
avgResults
###################################################################
# Simulation 2: Nonlinear Model (one variable)
set.seed(06232021) #set seed
for(i in 1:iter){
n <- 2000 #number of observations (trainig and test set combined)
sig <- 5 #error standard deviation
x <- runif(n, 0, 10) #generate x variable Unif(0,10) distribution
e <- rnorm(n, 0, sig) #generate error term
y <- 0.1*(x-7)^2-3*cos(x) + 5*log(abs(x)) + 3 + e #generate response values
data <- data.frame(x, y) #combine explanatory and response varables into data frame
train <- data[1:(n/2), ] #use first half of observations as training data
test <- data[(n/2+1):n, ] #use second half of observations as test data
allResults[,,iter] <- getResults(train, test, 10)
}
allResults2
avgResults[3,1] <- mean(allResults2[1,1,])
avgResults[3,2] <- mean(allResults2[1,2,])
avgResults[3,3] <- mean(allResults2[1,3,])
avgResults[4,1] <- mean(allResults2[2,1,])
avgResults[4,2] <- mean(allResults2[2,2,])
avgResults[4,3] <- mean(allResults2[2,3,])
avgResults
###################################################################
# Simulation 3: Linear Model (multivariate)
set.seed(06232012) #set seed
for(i in 1:iter){
n <- 2000 #number of observations (trainig and test set combined)
xvec <- runif(n*10, 0, 10) #generate observations for 10 explanatory variables
X <- matrix(xvec, ncol=10) #arrange explanatory variables into matrix
sig <- 5 #error standard deviation
e <- rnorm(n, 0, sig) #generate error term
y <- 2*X[,1]+3*X[,4]+4*X[,6]-3*X[,7]+ X[,9] + e #generate response values
data <- data.frame(X, y) #combine explanatory and response varables into data frame
train <- data[1:(n/2), ] #use first half of observations as training data
test <- data[(n/2+1):n, ] #use second half of observations as test data
allResults[,,iter] <- getResults(train, test, 1)
}
allResults3
avgResults[5,1] <- mean(allResults3[1,1,])
avgResults[5,2] <- mean(allResults3[1,2,])
avgResults[5,3] <- mean(allResults3[1,3,])
avgResults[6,1] <- mean(allResults3[2,1,])
avgResults[6,2] <- mean(allResults3[2,2,])
avgResults[6,3] <- mean(allResults3[2,3,])
avgResults
###################################################################
# Simulation 4: Nonlinear Model (multivariate)
set.seed(06231) #set seed
for(i in 1:iter){
n <- 2000 #number of observations (trainig and test set combined)
xvec <- runif(n*10, 0, 10) #generate observations for 10 explanatory variables
X <- matrix(xvec, ncol=10) #arrange explanatory variables into matrix
sig <- 5 #error standard deviation
e <- rnorm(n, 0, sig) #generate error term
y <- (X[,1]-6)^2 + 12*cos(X[,3]) + (X[,7]-5)*(X[,8]-3) + 0.02*(X[,10]-5)^5+ e #generate response values
data <- data.frame(X, y) #combine explanatory and response varables into data frame
train <- data[1:(n/2), ] #use first half of observations as training data
test <- data[(n/2+1):n, ] #use second half of observations as test data
allResults4[,,iter] <- getResults(train, test, 1)
}
allResults4
avgResults[7,1] <- mean(allResults4[1,1,])
avgResults[7,2] <- mean(allResults4[1,2,])
avgResults[7,3] <- mean(allResults4[1,3,])
avgResults[8,1] <- mean(allResults4[2,1,])
avgResults[8,2] <- mean(allResults4[2,2,])
avgResults[8,3] <- mean(allResults4[2,3,])
avgResults
colnames(avgResults) <- c("MSPE", "PIWidth", "CoverageRate")
rownames(avgResults) <- c("Sim1.LR", "Sim1.RF",
"Sim2.LR", "Sim2.RF",
"Sim3.LR", "Sim3.RF",
"Sim4.LR", "Sim4.RF" )
#plot for coverage rate
boxplot(len~supp*dose, notch=TRUE,
main = "Multiple boxplots for comparision",
at = c(1,2,3,4),
names = c("1", "2", "3", "4"),
col=(c("navy","maroon")))
|
b4954b770e730b9ddd5f17a5aee7e394fc9db11f
|
f81ac43a1d02013a9cb9eebc2a7d92da4cae9169
|
/tests/testthat/test_category.R
|
8015dfd756ebb6635000abd5313d7041af2090c8
|
[] |
no_license
|
gdemin/expss
|
67d7df59bd4dad2287f49403741840598e01f4a6
|
668d7bace676b555cb34d5e0d633fad516c0f19b
|
refs/heads/master
| 2023-08-31T03:27:40.220828
| 2023-07-16T21:41:53
| 2023-07-16T21:41:53
| 31,271,628
| 83
| 15
| null | 2022-11-02T18:53:17
| 2015-02-24T17:16:42
|
R
|
UTF-8
|
R
| false
| false
| 3,443
|
r
|
test_category.R
|
context("category")
suppressWarnings(RNGversion("3.5.0"))
set.seed(123)
dichotomy_matrix = matrix(sample(0:1,40,replace = TRUE,prob=c(.6,.4)),nrow=10)
colnames(dichotomy_matrix) = c("Milk","Sugar","Tea","Coffee")
dichotomy_matrix[] = 0
expect_equal_to_reference(as.category(dichotomy_matrix, prefix = "zero", compress=TRUE),
"rds/category2df.rds", update = FALSE)
expect_equal_to_reference(as.category(dichotomy_matrix, prefix = "zero", compress=FALSE),
"rds/category3df.rds", update = FALSE)
expect_true(is.category(as.category(dichotomy_matrix, prefix = "zero", compress=FALSE)))
expect_equal_to_reference(as.category(dichotomy_matrix[,FALSE, drop = FALSE], compress = FALSE),
"rds/category4df.rds", update = FALSE)
expect_identical(as.category(numeric(0),compress=TRUE),
structure(list(V1 = integer(0)), .Names = "V1", row.names = integer(0), class = c("category",
"data.frame")))
expect_equal_to_reference(as.category(t(t(c(0,1,0,1,0,1))),compress=TRUE),
"rds/category5df.rds", update = FALSE)
expect_equal_to_reference(as.category(t(c(0,1,0,1,0,1)),compress=TRUE),
"rds/category6df.rds", update = FALSE)
expect_equal_to_reference(as.category(c(0,1,0,1,0,1),compress=TRUE),
"rds/category5df.rds", update = FALSE)
set.seed(123)
dichotomy_matrix = matrix(sample(0:1,40,replace = TRUE,prob=c(.6,.4)),nrow=10)
colnames(dichotomy_matrix) = c("Milk","Sugar","Tea","Coffee")
# data.frame with variable labels
dichotomy_dataframe = as.data.frame(dichotomy_matrix)
colnames(dichotomy_dataframe) = paste0("product_", 1:4)
var_lab(dichotomy_dataframe[[1]]) = "Milk"
var_lab(dichotomy_dataframe[[2]]) = "Sugar"
var_lab(dichotomy_dataframe[[3]]) = "Tea"
var_lab(dichotomy_dataframe[[4]]) = "Coffee"
expect_equal_to_reference(as.category(dichotomy_dataframe, prefix = "products_",compress=TRUE),
"rds/category5.rds", update = FALSE)
dichotomy_dataframe2 = dichotomy_dataframe
var_lab(dichotomy_dataframe2[[4]]) = NULL
expect_identical(as.category(dichotomy_dataframe2, prefix = "products_",compress=TRUE),
add_val_lab(
as.category(dichotomy_dataframe, prefix = "products_",compress=TRUE),
c("product_4" = 4L)
)
)
dich = as.data.frame(matrix(NA, nrow = 3, ncol = 3))
expect_identical(as.category(dich, compress = TRUE),
structure(list(`NA` = c(NA, NA, NA)),
.Names = "NA", row.names = c(NA,
-3L),
class = c("category", "data.frame")))
expect_identical(as.category(dich, compress = TRUE),
structure(list(`NA` = c(NA, NA, NA)), .Names = "NA", row.names = c(NA,
-3L), class = c("category", "data.frame"))
)
expect_identical(as.category(dich[FALSE, FALSE, drop = FALSE]),
structure(list(`NA` = logical(0)),
.Names = "NA",
row.names = integer(0),
class = c("category",
"data.frame")))
set.seed(123)
dichotomy_matrix = matrix(sample(0:1,40,replace = TRUE,prob=c(.6,.4)),nrow=10)
colnames(dichotomy_matrix) = c("Used product|Milk","Used product|Sugar",
"Used product|Tea","Used product|Coffee")
expect_equal_to_reference(
as.category(dichotomy_matrix),
"rds/category7.rds", update = FALSE
)
expect_equal_to_reference(
as.category(dichotomy_matrix, compress = TRUE),
"rds/category8.rds", update = FALSE
)
|
af95faedaad217f1c4598ebfb0a0ed02646d2192
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Matrix/examples/abIseq.Rd.R
|
a3353ab04882015e2b12f7e2d7ab17c245160a73
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
abIseq.Rd.R
|
library(Matrix)
### Name: abIseq
### Title: Sequence Generation of "abIndex", Abstract Index Vectors
### Aliases: abIseq abIseq1 c.abIndex
### Keywords: manip classes
### ** Examples
stopifnot(identical(-3:20,
as(abIseq1(-3,20), "vector")))
try( ## (arithmetic) not yet implemented
abIseq(1, 50, by = 3)
)
|
f4acca731c3309a1fbffe2669621eee7ca6e0475
|
e6052cfff5c65990ce6e2c901526b5accd574751
|
/tune_parameters.R
|
40de200c4ab35d19a06b3354e9f13cfeacd23a89
|
[] |
no_license
|
jstn2/UT-utilities
|
0a5eeb9698ff2310fd60e747a35b813afbab833d
|
19671dcce73623fbb38bc2dbc108af3561201f8d
|
refs/heads/master
| 2022-12-07T09:42:06.625386
| 2020-09-02T20:41:29
| 2020-09-02T20:41:29
| 272,289,620
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,066
|
r
|
tune_parameters.R
|
# ..........................................................................
# Make precision-recall curves for tuning the event detection parameters
#
# Justin DuRant
# ..........................................................................
library(plyr)
library(leaps)
library(stringr)
library(randomForest)
library(xts)
library(dplyr)
library(magrittr)
library(knitr)
library(data.table)
library(BBmisc)
library(plotly)
library(mltools) # for MCC
library(caret) # for confusionMatrix
library(manipulateWidget) # For making panels of interactive plots. Can't save with orca.
library(raster) # rowSums
library(orca)
library(zoo) # for rollapply
library(beepr) # for alert when a task is finished use beep()
# set to TRUE to export pdfs or svgs of plots
save_img <- TRUE
#save_img <- FALSE
# set to TRUE to show plots in RStudio plots or viewer window
viewer <- TRUE
viewer <- FALSE
library(tidyverse)
getCurrentFileLocation <- function(){
this_file <- commandArgs() %>%
tibble::enframe(name = NULL) %>%
tidyr::separate(col=value, into=c("key", "value"), sep="=", fill='right') %>%
dplyr::filter(key == "--file") %>%
dplyr::pull(value)
if (length(this_file)==0)
{
this_file <- rstudioapi::getSourceEditorContext()$path
}
return(dirname(this_file))
}
setwd(getCurrentFileLocation())
getwd()
# *^ check to make sure it is the correct path (where the script is stored)
data_dir <- paste0(getwd(),"/data")
load(file.path(data_dir, "hourly_CEHW.RData"))
load(file.path(data_dir, "rf_mods701.RData"))
rf_mods <- rf_mods701
# directory to save tables and figures
tab_dir <- paste0(getwd(),"/table")
fig_dir <- paste0(getwd(),"/fig/prc")
if(!dir.exists(tab_dir)){dir.create(tab_dir)}
if(!dir.exists(fig_dir)){dir.create(fig_dir)}
# Adjust sizing of svg output
fig_width=417
fig_height=300
# Point to the functions that are stored in a separate script
src_dir <- getwd()
source(file=file.path(src_dir,'detect_event_function.R'))
source(file=file.path(src_dir,'plot_functions.R'))
# ///////////////////////////////////////////////
# roc and prec-rec curves
# ///////////////////////////////////////////////
# Make "event" variables in the df and mark technical errors
for (i in 1:length(dfs_tot)){
dfs_tot[[i]] <- mark_events(dfs_tot[[i]], dfs_clean[[i]])
}
# Add synthetic events to the data one utility at a time, make rf predictions,
# detect events for that one parameter, save important stuff and loop through other utilities
switches <<- c("elec","cool","heat","water")
# How many events to add
num_events <<- 9
# Percentile of the time series to use for average event size
perc <<- 0.95
# duration of synthetic events to inject
min_eventdays <- 7
max_eventdays <- 14
min_eventlen <<- 24*min_eventdays
max_eventlen <<- 24*max_eventdays
# length of window for threshold calculation
len_days <<- 30
len_hrs <- 24*len_days
# length (hours) for moving average of point errors
avg_hrs <- 2
# length of window for euclidean distance calculation
ED_hrs <- 4
# The number of days to wait before triggering an "alarm" state from the
# synthetic events. This variable will be used in the PRC curves and other eval
# as the "true event" state for comparison with the detected events. For
# example, if action_len is set to one day, then this variable will be zero for
# the first day of the event and one for the rest of the event. If this length
# is longer than min_eventdays, the fuction that adds synthetic events will
# break.
action_len <<- 3
# Event flag post-processing:
# Require certain frequency of events (time_frac) over a longer period (action_len)
# and from a certain amount of error models (mod_frac) before raising an alarm
mod_frac <- .7
time_frac <- .95
#action_len <- 3
# Loop over these threshold values for the curves
#k_vals <- seq(-10,50,5)
k_vals <- seq(-5,45,10)
#k_vals <- seq(10,20,5)
# this function returns a List of 4 dataframes with results for cool,
# elec, heat, water. The time it takes depends on number of kvals and avgwindows
many_precrec <- function(df, mods, k_vals, avg_windows){
all_precrec <- list()
result_list <- list()
for (j in 1:length(avg_windows)){
avg_hrs <<- avg_windows[j]
ED_hrs <<- avg_windows[j]
result_list[[j]] <- prec_rec(df, mods, k_vals)
}
for (g in 1:length(avg_windows)){
temp <- result_list[[g]]
c <- temp[,grep("Cool.", names(temp)), with=FALSE]
names(c) <- gsub("Cool.Cool","Cool",names(c))
c$avg_window <- rep(avg_windows[g], nrow(c))
e <- temp[,grep("Elec.", names(temp)), with=FALSE]
names(e) <- gsub("Elec.Elec","Elec",names(e))
e$avg_window <- avg_windows[g]
h <- temp[,grep("Heat.", names(temp)), with=FALSE]
names(h) <- gsub("Heat.Steam","Heat",names(h))
h$avg_window <- avg_windows[g]
w <- temp[,grep("Water.", names(temp)), with=FALSE]
names(w) <- gsub("Water.Water","Water",names(w))
w$avg_window <- avg_windows[g]
cehw <- list(c,e,h,w)
all_precrec[[g]] <- cehw
}
cool_res <- gatherlist(all_precrec, 1)
elec_res <- gatherlist(all_precrec, 2)
heat_res <- gatherlist(all_precrec, 3)
water_res <- gatherlist(all_precrec, 4)
results <- list(cool_res, elec_res, heat_res, water_res)
}
#avg_windows <- seq(2, 24, 2)
avg_windows <- c(2, 5)
avg_window_prc <- list()
for (i in 1:length(dfs_tot)){
avg_window_prc[[i]] <- many_precrec(dfs_tot[[i]], rf_mods[[i]], k_vals, avg_windows)
}
# Make plot for one bldg, one utility with all the values of modified parameter
plot_prec_avgwindow <- function(df, xname, yname, splitname, bldg, util, type){
quo_x <- enquo(xname)
quo_y <- enquo(yname)
quo_split <- enquo(splitname)
splitvar <- as.factor(df$avg_window)
# get length of threshold
colname <- names(df)[grep("thresh", names(df))]
thresh_hrs <- df[, ..colname][1]
legendtitle <- list(yref='paper',xref="paper",y=1.05,x=1.15,
text="error avg len (hrs)",showarrow=F)
if (type=="prc"){
p1 <- plot_ly(data = df, x=quo_x, y=quo_y, split = quo_split,
name=splitvar, mode="lines", type="scatter" ) %>%
layout(yaxis=list(title='Precision', range=c(0,1)),
xaxis = list(title='Recall', range=c(0,1)),
annotations=legendtitle )
filename <- paste0("prec-rec-changeAvg-short-",bldg,"-",util,"-threshold-", (thresh_hrs)/24, "-days.pdf")
} else {
p1 <- plot_ly(data = df, x=quo_x, y=quo_y, split = quo_split,
name=splitvar, mode="lines", type="scatter" ) %>%
layout(yaxis=list(title='True Positive Rate', range=c(0,1)),
xaxis = list(title='False Positive Rate', range=c(0,1)),
annotations=legendtitle )
filename <- paste0("ROC-changeAvg-short-",bldg,"-",util,"-threshold-", (thresh_hrs)/24, "-days.pdf")
}
# p1 <- df %>%
# group_by(avg_window) %>%
# plot_ly(x=~Cool.Cool_tpr, y=~Cool.Cool_ppv, name="Cool", mode="lines",
# type="scatter") %>%
if (save_img){
wd <- getwd()
setwd(fig_dir) # Change directory for saving figures
#if (!exists("adjust")) { adjust = 1 }
adjust = .8
orca(p1, filename ) #, width=fig_width*adjust, height=fig_height*adjust)
setwd(wd) # Change back to original working directory
}
if (viewer) {print(p1)}
#return(p1)
}
# send the different utilities to the plot function one at a time
plot_allprecrec <- function(eval_list, bldg){
plot_prec_avgwindow(eval_list[[1]], Cool_tpr, Cool_ppv, avg_window, bldg, "Cool", "prc")
plot_prec_avgwindow(eval_list[[2]], Elec_tpr, Elec_ppv, avg_window, bldg, "Elec", "prc")
plot_prec_avgwindow(eval_list[[3]], Heat_tpr, Heat_ppv, avg_window, bldg, "Heat", "prc")
plot_prec_avgwindow(eval_list[[4]], Water_tpr, Water_ppv, avg_window, bldg, "Water", "prc")
}
for (i in 1:length(dfs_tot)){
plot_allprecrec(avg_window_prc[[i]], bldg_names[[i]])
}
beep()
# ROC curve is not that useful unless the number in each class is roughly equal
plot_allROC <- function(eval_list, bldg){
plot_prec_avgwindow(eval_list[[1]], Cool_fpr, Cool_tpr, avg_window, bldg, "Cool", "ROC")
plot_prec_avgwindow(eval_list[[2]], Elec_fpr, Elec_tpr, avg_window, bldg, "Elec", "ROC")
plot_prec_avgwindow(eval_list[[3]], Heat_fpr, Heat_tpr, avg_window, bldg, "Heat", "ROC")
plot_prec_avgwindow(eval_list[[4]], Water_fpr, Water_tpr, avg_window, bldg, "Water", "ROC")
}
for (i in 1:length(dfs_tot)){
plot_allROC(avg_window_prc[[i]], bldg_names[[i]])
}
# ////////////////////////////////////////////////
# loop threshold length 1 to 40 days
# ////////////////////////////////////////////////
avg_hrs <<- 2
ED_hrs <<- 4
k_vals <- seq(-5,40,5)
many_precrec2 <- function(df, mods, k_vals, threshes){
all_precrec <- list()
result_list <- list()
for (j in 1:length(threshes)){
len_days <<- threshes[j]
len_hrs <<- 24*len_days # length of window for threshold calculation
result_list[[j]] <- prec_rec(df, mods, k_vals)
}
# reorganize the results into nicer tables instead of list
for (g in 1:length(threshes)){
temp <- result_list[[g]]
c <- temp[,grep("Cool.", names(temp)), with=FALSE]
names(c) <- gsub("Cool.Cool","Cool",names(c))
c$avg_window <- rep(avg_hrs, nrow(c))
e <- temp[,grep("Elec.", names(temp)), with=FALSE]
names(e) <- gsub("Elec.Elec","Elec",names(e))
e$avg_window <- rep(avg_hrs, nrow(e))
h <- temp[,grep("Heat.", names(temp)), with=FALSE]
names(h) <- gsub("Heat.Steam","Heat",names(h))
h$avg_window <- rep(avg_hrs, nrow(h))
w <- temp[,grep("Water.", names(temp)), with=FALSE]
names(w) <- gsub("Water.Water","Water",names(w))
w$avg_window <- rep(avg_hrs, nrow(w))
cehw <- list(c,e,h,w)
all_precrec[[g]] <- cehw
}
cool_res <- gatherlist(all_precrec, 1)
elec_res <- gatherlist(all_precrec, 2)
heat_res <- gatherlist(all_precrec, 3)
water_res <- gatherlist(all_precrec, 4)
results <- list(cool_res, elec_res, heat_res, water_res)
}
#threshes <- c(15,20,25,30,35,40)
threshes <- c(20, 35)
#initialize
thresh_prc <- list()
for (i in 1:length(dfs_tot)){
thresh_prc[[i]] <- many_precrec2(dfs_tot[[i]], rf_mods[[i]], k_vals, threshes)
}
plot_prec_thresh <- function(df, xname, yname, splitname, bldg, util, type){
quo_x <- enquo(xname)
quo_y <- enquo(yname)
quo_split <- enquo(splitname)
# moving average and ED window length
avghrs <- df$avg_window[1]
# get length of threshold
colname <- names(df)[grep("thresh", names(df))]
thresh_hrs <- df[, ..colname]
thresh_day <- thresh_hrs/24
# se multiplier for hovertext
colname2 <- names(df)[grep("se_mult", names(df))]
se_mult <- df[, ..colname2]
splitvar <- as.factor(thresh_day[[1]])
legendtitle <- list(yref='paper',xref="paper",y=1.1, x=1.2, # y=1.05,x=1.15,
text="threshold window \n(days)",showarrow=F)
if (type=="prc"){
p1 <- plot_ly(data = df, x=quo_x, y=quo_y, split = quo_split,
name=splitvar, mode="lines", type="scatter",
text = paste(as.matrix(se_mult)),
hoverinfo='text') %>%
layout(yaxis=list(title='Precision', range=c(0,1)),
xaxis = list(title='Recall', range=c(0,1)),
annotations=legendtitle )
filename <- paste0("prec-rec-changeThresh4-",util,"-",bldg,"-avgHrs-", avghrs, ".svg")
} else {
p1 <- plot_ly(data = df, x=quo_x, y=quo_y, split = quo_split,
name=splitvar, mode="lines", type="scatter" ) %>%
layout(yaxis=list(title='True Positive Rate', range=c(0,1)),
xaxis = list(title='False Positive Rate', range=c(0,1)),
annotations=legendtitle )
filename <- paste0("ROC-changeThresh-",bldg,"-",util,"-avgHrs-", avghrs, ".pdf")
}
if (save_img){
wd <- getwd()
setwd(fig_dir) # Change directory for saving figures
#if (!exists("adjust")) { adjust = 1 }
adjust = 1.2
orca(p1, filename , width=fig_width*adjust, height=fig_height*adjust)
setwd(wd) # Change back to original working directory
}
if (viewer){print(p1) }
#return(p1)
}
# Changing threshold
plot_allprecrec2 <- function(eval_list, bldg){
plot_prec_thresh(eval_list[[1]], Cool_tpr, Cool_ppv, Cool.thresh_len, bldg, "Cool", "prc")
plot_prec_thresh(eval_list[[2]], Elec_tpr, Elec_ppv, Elec.thresh_len, bldg, "Elec", "prc")
plot_prec_thresh(eval_list[[3]], Heat_tpr, Heat_ppv, Heat.thresh_len, bldg, "Heat", "prc")
plot_prec_thresh(eval_list[[4]], Water_tpr, Water_ppv, Water.thresh_len, bldg, "Water", "prc")
}
for (i in 1:length(dfs_tot)){
plot_allprecrec2(thresh_prc[[i]], bldg_names[[i]])
}
beep(2)
# ////////////////////////////////////////////////
# loop alarm length 3 to 7 days
# ////////////////////////////////////////////////
avg_hrs <<- 2
ED_hrs <<- 4
k_vals <- seq(-5,40,5)
len_days <<- 30
len_hrs <<- 24*len_days # length of window for threshold calculation
mod_frac <- .7
time_frac <- .95
#action_len <- 3
many_precrec3 <- function(df, mods, k_vals, action_lens){
all_precrec <- list()
result_list <- list()
for (j in 1:length(action_lens)){
action_len <<- action_lens[j]
result_list[[j]] <- prec_rec(df, mods, k_vals)
}
# reorganize the results into nicer tables instead of list
for (g in 1:length(action_lens)){
temp <- result_list[[g]]
c <- temp[,grep("Cool.", names(temp)), with=FALSE]
names(c) <- gsub("Cool.Cool","Cool",names(c))
c$avg_window <- rep(avg_hrs, nrow(c))
c$actionlen <- rep(action_lens[g], nrow(c))
e <- temp[,grep("Elec.", names(temp)), with=FALSE]
names(e) <- gsub("Elec.Elec","Elec",names(e))
e$avg_window <- rep(avg_hrs, nrow(e))
e$actionlen <- rep(action_lens[g], nrow(e))
h <- temp[,grep("Heat.", names(temp)), with=FALSE]
names(h) <- gsub("Heat.Steam","Heat",names(h))
h$avg_window <- rep(avg_hrs, nrow(h))
h$actionlen <- rep(action_lens[g], nrow(h))
w <- temp[,grep("Water.", names(temp)), with=FALSE]
names(w) <- gsub("Water.Water","Water",names(w))
w$avg_window <- rep(avg_hrs, nrow(w))
w$actionlen <- rep(action_lens[g], nrow(w))
cehw <- list(c,e,h,w)
all_precrec[[g]] <- cehw
}
cool_res <- gatherlist(all_precrec, 1)
elec_res <- gatherlist(all_precrec, 2)
heat_res <- gatherlist(all_precrec, 3)
water_res <- gatherlist(all_precrec, 4)
results <- list(cool_res, elec_res, heat_res, water_res)
}
action_lens <- c(3,4,5,6,7)
#action_lens <- c(3,7)
action_prc <- list()
for (i in 1:length(dfs_tot)){
action_prc[[i]] <- many_precrec3(dfs_tot[[i]], rf_mods[[i]], k_vals, action_lens)
}
plot_prec_actionlen <- function(df, xname, yname, splitname, bldg, util, type){
quo_x <- enquo(xname)
quo_y <- enquo(yname)
quo_split <- enquo(splitname)
# moving average and ED window length
avghrs <- df$avg_window[1]
# get length of window for alarm trigger (from signals)
colname <- names(df)[grep("action", names(df))]
action_len <- df[, ..colname]
# se multiplier for hovertext
colname2 <- names(df)[grep("se_mult", names(df))]
se_mult <- df[, ..colname2]
splitvar <- as.factor(action_len[[1]])
legendtitle <- list(yref='paper',xref="paper",y=1.05,x=1.15,
text="action length (days)",showarrow=F)
if (type=="prc"){
p1 <- plot_ly(data = df, x=quo_x, y=quo_y, split = quo_split,
name=splitvar, mode="lines", type="scatter", hoverinfo = 'se_mult') %>%
layout(yaxis=list(title='Precision', range=c(0,1)),
xaxis = list(title='Recall', range=c(0,1)),
annotations=legendtitle )
filename <- paste0("prec-rec-changeActionLen-",util,"-",bldg,"-avgHrs-", avghrs, ".pdf")
} else {
p1 <- plot_ly(data = df, x=quo_x, y=quo_y, split = quo_split,
name=splitvar, mode="lines", type="scatter" ) %>%
layout(yaxis=list(title='True Positive Rate', range=c(0,1)),
xaxis = list(title='False Positive Rate', range=c(0,1)),
annotations=legendtitle )
filename <- paste0("ROC-changeActionLen-",bldg,"-",util,"-avgHrs-", avghrs, ".pdf")
}
if (save_img){
wd <- getwd()
setwd(fig_dir) # Change directory for saving figures
#if (!exists("adjust")) { adjust = 1 }
adjust = .8
orca(p1, filename ) #, width=fig_width*adjust, height=fig_height*adjust)
setwd(wd) # Change back to original working directory
}
if (viewer) {print(p1)}
#return(p1)
}
plot_allprecrec3 <- function(eval_list, bldg){
plot_prec_actionlen(eval_list[[1]], Cool_tpr, Cool_ppv, actionlen, bldg, "Cool", "prc")
plot_prec_actionlen(eval_list[[2]], Elec_tpr, Elec_ppv, actionlen, bldg, "Elec", "prc")
plot_prec_actionlen(eval_list[[3]], Heat_tpr, Heat_ppv, actionlen, bldg, "Heat", "prc")
plot_prec_actionlen(eval_list[[4]], Water_tpr, Water_ppv, actionlen, bldg, "Water", "prc")
}
for (i in 1:length(dfs_tot)){
plot_allprecrec3(action_prc[[i]], bldg_names[[i]])
}
beep(2)
# ////////////////////////////////////////////////
# loop time fraction for alarm 0.2 to 0.99
# ////////////////////////////////////////////////
mod_frac <- .7
#time_frac <- .9
action_len <- 3
many_precrec4 <- function(df, mods, k_vals, time_fracs){
all_precrec <- list()
result_list <- list()
for (j in 1:length(time_fracs)){
time_frac <<- time_fracs[j]
result_list[[j]] <- prec_rec(df, mods, k_vals)
}
# reorganize the results into nicer tables instead of list
for (g in 1:length(time_fracs)){
temp <- result_list[[g]]
c <- temp[,grep("Cool.", names(temp)), with=FALSE]
names(c) <- gsub("Cool.Cool","Cool",names(c))
c$avg_window <- rep(avg_hrs, nrow(c))
c$time_frac <- rep(time_fracs[g], nrow(c))
e <- temp[,grep("Elec.", names(temp)), with=FALSE]
names(e) <- gsub("Elec.Elec","Elec",names(e))
e$avg_window <- rep(avg_hrs, nrow(e))
e$time_frac <- rep(time_fracs[g], nrow(e))
h <- temp[,grep("Heat.", names(temp)), with=FALSE]
names(h) <- gsub("Heat.Steam","Heat",names(h))
h$avg_window <- rep(avg_hrs, nrow(h))
h$time_frac <- rep(time_fracs[g], nrow(h))
w <- temp[,grep("Water.", names(temp)), with=FALSE]
names(w) <- gsub("Water.Water","Water",names(w))
w$avg_window <- rep(avg_hrs, nrow(w))
w$time_frac <- rep(time_fracs[g], nrow(w))
cehw <- list(c,e,h,w)
all_precrec[[g]] <- cehw
}
cool_res <- gatherlist(all_precrec, 1)
elec_res <- gatherlist(all_precrec, 2)
heat_res <- gatherlist(all_precrec, 3)
water_res <- gatherlist(all_precrec, 4)
results <- list(cool_res, elec_res, heat_res, water_res)
}
#time_fracs <- c(0.5, 0.9, 0.95, .99)
time_fracs <- c(0.8, 0.95)
alarm_prc <- list()
for (i in 1:length(dfs_tot)){
alarm_prc[[i]] <- many_precrec4(dfs_tot[[i]], rf_mods[[i]], k_vals, time_fracs)
}
plot_prec_alarmfrac <- function(df, xname, yname, splitname, bldg, util, type){
quo_x <- enquo(xname)
quo_y <- enquo(yname)
quo_split <- enquo(splitname)
# moving average and ED window length
avghrs <- df$avg_window[1]
# get fraction of signals for alarm (phi)
colname <- names(df)[grep("frac", names(df))]
frac <- df[, ..colname]
# se multiplier for hovertext
colname2 <- names(df)[grep("se_mult", names(df))]
se_mult <- df[, ..colname2]
splitvar <- as.factor(frac[[1]])
legendtitle <- list(yref='paper',xref="paper",y=1.05,x=1.15,
text="alarm fraction",showarrow=F)
if (type=="prc"){
p1 <- plot_ly(data = df, x=quo_x, y=quo_y, split = quo_split,
name=splitvar, mode="lines", type="scatter",
text = paste(as.matrix(se_mult)),
hoverinfo='text') %>%
layout(yaxis=list(title='Precision', range=c(0,1)),
xaxis = list(title='Recall', range=c(0,1)),
annotations=legendtitle )
filename <- paste0("prec-rec-changeAlarmFrac-",util,"-",bldg,"-avgHrs-", avghrs, ".pdf")
} else {
p1 <- plot_ly(data = df, x=quo_x, y=quo_y, split = quo_split,
name=splitvar, mode="lines", type="scatter" ) %>%
layout(yaxis=list(title='True Positive Rate', range=c(0,1)),
xaxis = list(title='False Positive Rate', range=c(0,1)),
annotations=legendtitle )
filename <- paste0("ROC-changeAlarmFrac-",util,"-",bldg,"-avgHrs-", avghrs, ".pdf")
}
if (save_img){
wd <- getwd()
setwd(fig_dir) # Change directory for saving figures
#if (!exists("adjust")) { adjust = 1 }
adjust = .8
orca(p1, filename ) #, width=fig_width*adjust, height=fig_height*adjust)
setwd(wd) # Change back to original working directory
}
if (viewer) {print(p1)}
#return(p1)
}
plot_allprecrec4 <- function(eval_list, bldg){
plot_prec_alarmfrac(eval_list[[1]], Cool_tpr, Cool_ppv, time_frac, bldg, "Cool", "prc")
plot_prec_alarmfrac(eval_list[[2]], Elec_tpr, Elec_ppv, time_frac, bldg, "Elec", "prc")
plot_prec_alarmfrac(eval_list[[3]], Heat_tpr, Heat_ppv, time_frac, bldg, "Heat", "prc")
plot_prec_alarmfrac(eval_list[[4]], Water_tpr, Water_ppv, time_frac, bldg, "Water", "prc")
}
for (i in 1:length(dfs_tot)){
plot_allprecrec4(alarm_prc[[i]], bldg_names[[i]])
}
beep()
|
8482fe72e75d4462d8ae5cd1b9d691c05bd48ce5
|
c88b0cbeda0edf9e745e324ef942a504e27d4f87
|
/Budongo cognition/BFactoring.R
|
1a8ffd7f85fa7f3060d0b98696ff31ef0b0d7423
|
[] |
no_license
|
Diapadion/R
|
5535b2373bcb5dd9a8bbc0b517f0f9fcda498f27
|
1485c43c0e565a947fdc058a1019a74bdd97f265
|
refs/heads/master
| 2023-05-12T04:21:15.761115
| 2023-04-27T16:26:35
| 2023-04-27T16:26:35
| 28,046,921
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,057
|
r
|
BFactoring.R
|
library(BayesFactor)
data(puzzles)
## neverExclude argument makes sure that participant factor ID
## is in all models
result = generalTestBF(RT ~ shape*color + ID, data = puzzles, whichRandom = "ID",
neverExclude="ID", progress=FALSE)
result
BF.p = lm(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers)
summary(BF.p)
lmBF.1 = lmBF(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers, progress=F)
regrBF.1 = regressionBF(as.numeric(participat) ~ Dominance + Conscientiousness + Openness +
Neuroticism + Extraversion + Agreeableness, data=aggPers)
mod.gm1 <- generalTestBF(as.numeric(Accuracy) ~ Dominance + Conscientiousness + Openness + Neuroticism
+ Agreeableness + Extraversion + as.factor(Chimp),
whichRandom = "Chimp",
#family = binomial,
data=cz_bin_pers
)
|
70aefb6f7c935712a37b2ba9a4a5c244d9a5a2b1
|
3bd22cace07e560a11159b69ef0679f4d71bac38
|
/Database Management/Mine a Database/LoadDataWarehouse.LiuY.XuM.R
|
3eaff5ff9e95d5ee3183c2a113ae942de8d9465f
|
[] |
no_license
|
xiajingdongning/liuyangli
|
9bad094ce729338cef5c59ad0d7ce3bb5c05e497
|
9eb7be94e886fd75bb6a355ae4b82eeed04df1f3
|
refs/heads/master
| 2023-09-01T02:49:35.135879
| 2023-08-19T00:56:13
| 2023-08-19T00:56:13
| 234,586,231
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,268
|
r
|
LoadDataWarehouse.LiuY.XuM.R
|
# Name: Yangli Liu
# Name: Mingyi Xu
# Course: CS5200 Spring 2023
# Date: 4/19/2023
# Install Dependency
if("RSQLite" %in% rownames(installed.packages()) == FALSE) {
install.packages("RMySQL")
}
if("RSQLite" %in% rownames(installed.packages()) == FALSE) {
install.packages("RSQLite")
}
# Load Dependency
library(RMySQL)
library(RSQLite)
# Create a connection object to the MySQL database, please replace dbname and password with your local instance
con <- dbConnect(MySQL(),
dbname = "LoadDataWarehouse",
host = "localhost",
port = 3306,
user = "root",
password = "YL1saf!!2015")
# Check if the connection was successful
if (dbIsValid(con)) {
cat("Connected to MySQL database successfully!\n")
} else {
cat("Failed to connect to MySQL database.\n")
}
dbSendQuery(con, "SET GLOBAL local_infile = true")
# Author fact table must include the authors id, author name, number of articles by that author,
# total number of co-authors across all articles. Load the data from the SQLite Database created in Part 1.
# Create a star schema for author facts. In order to make the code faster, I think I don't need dimension tables.
dbSendQuery(con, "CREATE TABLE IF NOT EXISTS AuthorFact (
author_id INTEGER NOT NULL,
author_name TEXT,
articles_count INTEGER,
co_authors_count INTEGER,
PRIMARY KEY (author_id)
);")
# Connect to the SQLite database
fpath <- ""
dbfile <- "pubmed.db"
sqlite_con <- dbConnect(RSQLite::SQLite(), paste0(fpath, dbfile))
# Select data from SQLite and store in data frame
author_data <- dbGetQuery(sqlite_con, "SELECT Authors.id AS author_id, Authors.lastName || ', ' || Authors.forename AS author_name,
COUNT(DISTINCT Authorship.article_id) AS articles_count, COUNT(DISTINCT Authorship2.author_id) AS co_authors_count
FROM Authors
INNER JOIN Authorship ON Authors.id = Authorship.author_id
INNER JOIN Publish ON Authorship.article_id = Publish.article_id
INNER JOIN Authorship AS Authorship2 ON Publish.article_id = Authorship2.article_id
GROUP BY Authors.id;")
# Loop over author_data data frame to insert into MySQL database as AuthorFact table
author_data$author_name <- gsub("'", " ", author_data$author_name)
for (i in seq_len(nrow(author_data))) {
dbSendQuery(con, sprintf("INSERT INTO AuthorFact (author_id, author_name, articles_count, co_authors_count) VALUES (%d, '%s', %d, %d);",
author_data[i, "author_id"],
author_data[i, "author_name"],
author_data[i, "articles_count"],
author_data[i, "co_authors_count"]))
}
# Check the AuthorFact table
dbGetQuery(con, "SELECT * FROM AuthorFact LIMIT 10;")
# Create the JournalFact table in MySQL
dbSendQuery(con, "CREATE TABLE IF NOT EXISTS journalFact (
id INTEGER PRIMARY KEY AUTO_INCREMENT,
journal_id INTEGER,
journal_name TEXT,
year INTEGER,
quarter TEXT,
month INTEGER,
articles_per_year INTEGER,
articles_per_quarter INTEGER,
articles_per_month INTEGER
);")
# Basic journalDim construction
query_journal <- "SELECT Journals.id AS id, Journals.title AS journal_name
FROM Journals
GROUP BY Journals.id, journal_name;"
# Execute the query and insert the results into the journalDim table
result_journal <- dbGetQuery(sqlite_con, query_journal)
result_journal$journal_name <- gsub("'", " ", result_journal$journal_name)
# Number of articles published per year for yearDim
query_year <- "SELECT Journals.id As id, Journals.title AS journal_name, Publish.year AS year,
COUNT(DISTINCT Articles.id) AS articles_per_year
FROM Journals
INNER JOIN Publish ON Journals.id = Publish.journal_id
INNER JOIN Articles ON Publish.article_id = Articles.id
GROUP BY Journals.id,journal_name, year;
"
# Execute the query and insert the results into the yearDim table
result_year <- dbGetQuery(sqlite_con, query_year)
# Handle a few special cases
result_year$journal_name <- gsub("'", " ", result_year$journal_name)
# Number of articles published per quarter for quarterDim
query_quarter <- "SELECT Journals.id As id, Publish.year AS year,
CASE
WHEN (strftime('%m', Publish.date)) IN ('01', '02', '03') THEN 'Q1'
WHEN (strftime('%m', Publish.date)) IN ('04', '05', '06') THEN 'Q2'
WHEN (strftime('%m', Publish.date)) IN ('07', '08', '09') THEN 'Q3'
ELSE 'Q4'
END AS quarter, COUNT(DISTINCT Articles.id) AS articles_per_quarter
FROM Journals
INNER JOIN Publish ON Journals.id = Publish.journal_id
INNER JOIN Articles ON Publish.article_id = Articles.id
GROUP BY Journals.id, year, quarter;"
# Execute the query and insert the results into the quarterDim table
result_quarter <- dbGetQuery(sqlite_con, query_quarter)
# Number of articles published per month for monthDim
query_month <- "SELECT Journals.id, Publish.year AS year, substr(Publish.date, 6, 2) AS month,
CASE
WHEN (strftime('%m', Publish.date)) IN ('01', '02', '03') THEN 'Q1'
WHEN (strftime('%m', Publish.date)) IN ('04', '05', '06') THEN 'Q2'
WHEN (strftime('%m', Publish.date)) IN ('07', '08', '09') THEN 'Q3'
ELSE 'Q4'
END AS quarter,
COUNT(DISTINCT Articles.id) AS articles_per_month
FROM Journals
INNER JOIN Publish ON Journals.id = Publish.journal_id
INNER JOIN Articles ON Publish.article_id = Articles.id
GROUP BY Journals.id, year, month, quarter;
"
# Execute the query and insert the results into the monthDim table
result_month <- dbGetQuery(sqlite_con, query_month)
# Construct the JournalFact table using corresponding queries
# Merge the result_journal and result_year dataframes on the 'id' column
journal_year <- merge(result_journal, result_year, by = c('id', 'journal_name'))
# Merge the journal_year and result_quarter dataframes on the 'id' and 'year' columns
journal_year_quarter <- merge(journal_year, result_quarter, by = c('id', 'year'))
# Merge the journal_year_quarter and result_month dataframes on the 'id', 'year', and 'quarter' columns
journal_year_quarter_month <- merge(journal_year_quarter, result_month, by = c('id', 'year', 'quarter'))
# Construct fact table
for (i in 1:nrow(journal_year_quarter_month)) {
# Extract values for the current row
journal_id <- journal_year_quarter_month$id[i]
journal_name <- journal_year_quarter_month$journal_name[i]
year <- journal_year_quarter_month$year[i]
quarter <- journal_year_quarter_month$quarter[i]
month <- journal_year_quarter_month$month[i]
articles_per_year <- journal_year_quarter_month$articles_per_year[i]
articles_per_quarter <- journal_year_quarter_month$articles_per_quarter[i]
articles_per_month <- journal_year_quarter_month$articles_per_month[i]
# Construct the SQL query for inserting the current row into the journalFact table
insert_query <- paste("INSERT INTO journalFact (journal_id, journal_name, year, quarter, month, articles_per_year, articles_per_quarter, articles_per_month)",
"VALUES (", journal_id, ",", "'", journal_name, "'", ",", year, ",", "'", quarter, "'", ",", month, ",", articles_per_year, ",", articles_per_quarter, ",", articles_per_month, ");", sep="")
# Execute the SQL query using the database connection 'con'
dbSendQuery(con, insert_query)
}
# Check the JournalFact table
dbGetQuery(con, "SELECT * FROM JournalFact LIMIT 10;")
# Disconnect from SQLite database
dbDisconnect(sqlite_con)
# Disconnect from MySQL database
dbDisconnect(con)
|
d5a455bc888bd4a9a14f41e557434325ed78b699
|
cc8d779bc656c99b24e4eb19a5423b8effbd8d16
|
/man/fhStart.Rd
|
994d5e33f0206eb90398e62894cfe1d3f9fb72a0
|
[] |
no_license
|
plantarum/flowPloidy
|
621c63af28d0723e8c8c255491abca43242a6d35
|
e9f5a800f01de31853978f8566c4a63a271dfefc
|
refs/heads/master
| 2023-04-06T20:28:25.763379
| 2023-03-17T14:50:06
| 2023-03-17T14:50:06
| 113,072,730
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,103
|
rd
|
fhStart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FlowHist.R
\name{fhStart}
\alias{fhStart}
\title{Calculate the where to start analysis for a \code{\link{FlowHist}}
histogram}
\usage{
fhStart(intensity)
}
\arguments{
\item{intensity}{numeric, the fluorescence intensity channel bins}
}
\value{
an integer, the index of the first intensity element to include
in the actual model fitting. That is, everything from \code{startBin}
to the end of \code{intensity} gets fit in the model, everything below
\code{startBin} is ignored.
}
\description{
We exclude the first five bins at the outset (as part of the function
\code{\link{setBins}}. For some flow cytometers, these values contain
very high spikes that are an artifact of compensation, and are not
useful data.
}
\details{
After that, we call \code{\link{fhStart}} to skip to the highest value
in the first 10 non-zero bins, and ignore everything below that. The
motivation here is the same - to get out beyond the noisy bins and into
the actual data we're trying to fit.
}
\author{
Tyler Smith
}
\keyword{internal}
|
0557e32631df9887c0de89da98d6620bb62e93e1
|
144fc787ba3309d7abc8f8c7bd92f51b3e93bb9b
|
/Data_Cleansing.R
|
c487ad017191c3383407d0d55be3cda53e5970c7
|
[] |
no_license
|
nguyendoanbb/GermanCredit
|
426ccd9ebf60da8adbf8934a7867bc81b7ddcb70
|
63e3ce900a915fd89d1869384fe68a42bb25d504
|
refs/heads/master
| 2020-04-20T04:47:56.085448
| 2019-02-06T16:59:53
| 2019-02-06T16:59:53
| 168,638,366
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 817
|
r
|
Data_Cleansing.R
|
########################################################################
#Data transformation
head(data) #20 features and 1000 observations, some variables need editing in order to be useful for analysis
str(data)
#removing single quote from all values in the table
for (i in c(1,3:4,6:7,9:10,12,14:15,17,19:20)){
data[,i] <- gsub("'",'',data[,i])
}
data$personal_status <- ifelse(data$personal_status == 'female div/dep/mar', 'female div/sep/mar', data$personal_status)
data <- data %>% mutate_if(is.character, as.factor) #converge character to factor
str(data) #checking if convergence works
#Divide data into train and test
set.seed(123)
train <- sample(1:nrow(data), nrow(data)*2/3)
data.train <- data[train,]
data.test <- data[-train,]
########################################################################
|
90065386b8d2108267b6865a4c5098931b43c3b6
|
9f3f65c30ccaea7b7054590e8159c27b7d16e242
|
/pitcher_deception.R
|
a19565e3021e949d7b6770c9752e6e309419a02d
|
[] |
no_license
|
jeaninem8/mlb_deception
|
a4030fcd3e964c31e1dc936a7b2b184c851bb124
|
1b870b53b8cd35828c9d20b68d5627477024457c
|
refs/heads/main
| 2023-02-04T20:47:41.120178
| 2020-12-19T03:30:13
| 2020-12-19T03:30:13
| 305,537,164
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,285
|
r
|
pitcher_deception.R
|
# Jeanine Minnick
# creating a leaderboard of MLB's most deceptive pitchers
# use distance from average release point, spin rate, strike rates both swinging and looking (except on 3-0), and hit rate on strikes thrown
library(dplyr)
library(data.table)
library(grDevices)
library(geometry)
# using data from 2017-October 12 2020
savant <- read.csv("/Volumes/My Passport/Savant Data 2017-2020 season/savant101320.csv")
savant$player_name <- as.character(savant$player_name)
savant <- savant %>% filter(game_year == '2019' | game_year == '2020')
# quantify "deception" value
# weight these release point distances to average for each pitcher
# will want to factor each part of overall formula (what parts are more important to deception)
# weights are based on relative values of the actions with respect to deception
# swingingk_pct - want higher
# lookingk_pct - want higher
# rel_var_dbd 1/rel_var_dbd - want lower
# hit_pct 1/hit_pct - want lower
# speed_var - want higher
# bauer_avg - want higher
# break_area - want higher
# deception <- swingingk_pct + lookingk_pct + (1/rel_var_dbd) + (1/hit_pct) + speed_var + bauer_avg + break_area
#################
#################
# all savant data
# pitchers with at least 500 pitches from 2019 & 2020
pitch_count <- as.data.frame(savant %>% group_by(pitcher) %>% tally()) # count total pitches for each pitcher id
id_list <- (pitch_count %>% filter(n >= 500))[,1] # filter out players without 500 total pitches, get this as a player list
deception_mat <- matrix(data = NA, nrow = length(id_list), ncol = 10) # create a matrix for the calculated values to be placed
deception_mat <- as.data.frame(deception_mat)
colnames(deception_mat) <- c("player_id", "player_name", "deception", "swingingk_pct", "lookingk_pct", "rel_var_dbd_inv", "hit_pct_inv", "speed_var", "bauer_avg", "break_area")
# overall loop for calculating values for each individual pitcher
for (i in 1:length(id_list)) {
deception_mat[i,1] <- id_list[i] # apply pitcher id to deception table
# filter data for player
player_data <- savant %>% filter(pitcher == id_list[i])
deception_mat[i,2] <- player_data$player_name[1] # apply pitcher name to deception table
# calculate swinging k percent
swingingk_pct <- sum(grepl("swinging_strike", player_data$description, fixed = TRUE))/nrow(player_data)
deception_mat[i,4] <- swingingk_pct # apply pitcher swinging k percent to deception table
# calculate looking k percent - exclude pitches on 3-0 counts
player_data_no30 <- player_data %>% filter(!(balls == 3 & strikes == 0))
lookingk_pct <- sum(grepl("called_strike", player_data_no30$description, fixed = TRUE))/nrow(player_data_no30)
deception_mat[i,5] <- lookingk_pct # apply pitcher looking k percent to deception table
# calculate release variability on a day-by-day basis - likely to make small mechanical changes, mound starting point, etc. on a gam-by-game basis
date_list <- unique(as.character(player_data$game_date)) # want individual dates the pitcher threw on
# run loop for each date
for (k in 1:length(date_list)) {
avg_rel_x <- mean(player_data$release_pos_x[player_data$game_date == date_list[k]], na.rm = TRUE) # average release point (x)
avg_rel_z <- mean(player_data$release_pos_z[player_data$game_date == date_list[k]], na.rm = TRUE) # average release point (z)
# calculate the distance from average release point for each pitch
for (j in 1:nrow(player_data)) {
if (player_data$game_date[j] == date_list[k]) {
player_data$dist_from_rel_center[j] <- sqrt((player_data$release_pos_x[j] - avg_rel_x)^2 + (player_data$release_pos_z[j] - avg_rel_z)^2)
}
}
}
rel_var_dbd <- var(player_data$dist_from_rel_center, na.rm = TRUE) # variance of distance from release point center
deception_mat[i,6] <- 1/rel_var_dbd # apply release point variance to deception table
# hit percent for balls contacted
hit_pct <- sum(player_data$events %like% "home_run|single|double|triple")/sum(player_data$description %like% "hit_into_play|foul")
deception_mat[i,7] <- 1/hit_pct # apply hit percent to deception table
# speed differences (higher differences means greater variability in pitch speed)
speed_var <- var(player_data$release_speed, na.rm = TRUE)
deception_mat[i,8] <- speed_var # apply speed variance to deception table
# fastball Bauer Units
for (b in 1:nrow(player_data)) {
if (player_data$pitch_type[b] == "FF" | player_data$pitch_type[b] == "FT" | player_data$pitch_type[b] == "FC") {
player_data$bauer_unit[b] <- player_data$release_spin_rate[b]/player_data$release_speed[b]
}
else {player_data$bauer_unit[b] <- NA}
}
# for a small number of players, they did not throw one of the pitch types above, but did throw a sinker - used to calculate in these instances
if (sum(!is.na(player_data$bauer_unit)) == 0) {
for (b in 1:nrow(player_data)) {
if (player_data$pitch_type[b] == "SI") {
player_data$bauer_unit[b] <- player_data$release_spin_rate[b]/player_data$release_speed[b]
}
else {player_data$bauer_unit[b] <- NA}
}
}
bauer_avg <- mean(player_data$bauer_unit, na.rm = TRUE)
deception_mat[i,9] <- bauer_avg # apply bauer unit average to deception table
# use ax az for break - breaking pitches have higher numbers
# want larger area within triangle - larger break range means more deception
accel_x <- player_data %>% filter(pitch_type != '') %>% group_by(pitch_type) %>% summarise(avg_ax = mean(ax)) %>% pull(2) # calculate average ax for each pitch type
accel_z <- player_data %>% filter(pitch_type != '') %>% group_by(pitch_type) %>% summarise(avg_ax = mean(az)) %>% pull(2) # calculate average az for each pitch type
# calculate pitch acceleration area if pitcher has more than 2 pitches
if (length(accel_x) > 2) {
point_order <- chull(accel_x, accel_z)
accel_x <- accel_x[point_order]
accel_z <- accel_z[point_order]
break_area <- polyarea(accel_x, accel_z)
deception_mat[i,10] <- break_area
} else{deception_mat[i,10] <- sqrt((accel_x[1] - accel_x[2])^2 + (accel_z[1] - accel_z[2])^2)} # <- calculate distance from pitches if only two pitch types
# calculate deception and apply to table
deception <- swingingk_pct + lookingk_pct + (1/rel_var_dbd) + (1/hit_pct) + speed_var + bauer_avg + break_area
deception_mat[i,3] <- deception
}
# calculate average of each part of deception equation - we will make all values equal in our definition
# each needs to equal 14.28571 to make average 100
mean(deception_mat$swingingk_pct)*126.0255
mean(deception_mat$lookingk_pct)*89.09515
mean(deception_mat$rel_var_dbd_inv)/10.588821
mean(deception_mat$hit_pct_inv)*2.422145
mean(deception_mat$speed_var)/1.912593
mean(deception_mat$bauer_avg)/1.718192
mean(deception_mat$break_area)/10.51374
# copy deception table to another (final) table
deception_final <- deception_mat
# final formula
deception_final$deception <- deception_final$swingingk_pct*126.0255 + deception_final$lookingk_pct*89.09515 +
deception_final$rel_var_dbd_inv/10.588821 + deception_final$hit_pct_inv*2.422145 + deception_final$speed_var/1.912593 +
deception_final$bauer_avg/1.718192 + deception_final$break_area/10.51374
|
8ed9e3e2914346e16fcba3bac01e4c8e31927d21
|
285541e8ae77482ac7eeb5b51ce06edeb96ef246
|
/man/adult_trees.Rd
|
573f88b671e161176facf8f83caf627da9f1d357
|
[] |
no_license
|
myllym/GET
|
2033c4f590da7cce114b588e7e39b243b543dcdf
|
72988291d9c56b468c5dddfb5bc2c23f519b6dca
|
refs/heads/master
| 2023-08-24T23:23:14.364346
| 2023-08-15T21:33:51
| 2023-08-15T21:33:51
| 68,914,145
| 12
| 5
| null | 2022-11-16T07:55:16
| 2016-09-22T11:20:34
|
R
|
UTF-8
|
R
| false
| true
| 1,305
|
rd
|
adult_trees.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adult_trees.r
\docType{data}
\name{adult_trees}
\alias{adult_trees}
\title{Adult trees data set}
\format{
A \code{data.frame} containing the locations (x- and y-coordinates) of 67 trees
in an area of 75 m x 75 m.
}
\usage{
data("adult_trees")
}
\description{
Adult trees data set
}
\details{
A pattern of large trees (height > 25 m) originating from an uneven aged multi-species
broadleaf nonmanaged forest in Kaluzhskie Zaseki, Russia.
The pattern is a sample part of data collected over 10 ha plot as a part of a research
program headed by project leader Prof. O.V. Smirnova.
}
\examples{
if(require("spatstat.geom", quietly=TRUE)) {
data("adult_trees")
adult_trees <- as.ppp(adult_trees, W = square(75))
plot(adult_trees)
}
}
\references{
Grabarnik, P. and Chiu, S. N. (2002) Goodness-of-fit test for complete spatial randomness against
mixtures of regular and clustered spatial point processes. Biometrika, 89, 411–421.
van Lieshout, M.-C. (2010) Spatial point process theory. In Handbook of Spatial Statistics (eds. A. E.
Gelfand, P. J. Diggle, M. Fuentes and P. Guttorp), Handbooks of Modern Statistical Methods. Boca
Raton: CRC Press.
}
\seealso{
\code{\link{saplings}}
}
\keyword{datasets}
\keyword{spatial}
|
a634a17893304a55a69a3a2f38409ce2e1c1925c
|
eee5f4bfeba72f55c603eb8fbaa04d50af0165a8
|
/R/xmastreewire.R
|
89966f899d38eddab7f492c4f59561f87cd14199
|
[] |
no_license
|
cran/christmas
|
a5a1b761fd27fb25e9dd813ac8b18d2e73b04787
|
9e9d13c9e15639ec71a657f52f36844a14692ce5
|
refs/heads/master
| 2022-12-27T17:02:41.158735
| 2022-12-18T16:50:02
| 2022-12-18T16:50:02
| 236,570,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,355
|
r
|
xmastreewire.R
|
#' @title Wire Christmas tree.
#'
#' @description A random wire Christmas tree (2021 card).
#'
#' @param year Year to be printed. Default is \code{2022}.
#' @param language Language to be used in the card. One of \code{c("english",
#' "spanish", "catalan")}. Default is \code{"english"}.
#' @param seed Seed for reproducibility of the card. Default is \code{NULL} (no
#' seed).
#' @return A Christmas card plot including a random wire tree.
#' @author Jose Barrera-Gomez.
#' @examples
#' \donttest{
#' xmastreewire(year = 2020, language = "catalan", seed = 666)
#' }
#' @export
xmastreewire <- function (year = 2022,
language = c("english", "spanish", "catalan"),
seed = NULL) {
if (!inherits(year, c("numeric", "integer")) || length(year) != 1L)
stop("'year' must be a number")
language <- match.arg(language)
if (!is.null(seed) & (is.na(seed) || !is(seed, "numeric")))
stop("'seed' must be numeric or NULL")
if (!is.null(seed))
set.seed(seed)
r <- 0.6
t <- 0.8
newwindow()
Sys.sleep(0.5 * t)
xmin <- -10
xmax <- 6
ymin <- -5
ymax <- 10
np <- 2000
u <- runif(np, xmin, xmax)
v <- runif(np, ymin, ymax)
op <- par(family = "HersheySerif")
on.exit(par(op))
op
plot(c(xmin, xmax), c(ymin, ymax), type = "n", asp = 1, axes = F,
xlab = "", ylab = "")
h <- ymin + (ymax - ymin)/3
polygon(c(xmin, xmin, xmax, xmax), c(ymin, h, h, ymin), border = NA,
col = "azure2")
polygon(c(xmin, xmin, xmax, xmax), c(h, ymax, ymax, h), border = NA,
col = "darkblue")
d <- (xmax - xmin)/100
x0 <- seq(xmin + d, xmax - d, by = 0.01)
lines(x0, h + rnorm(length(x0), 0, 0.05), type = "l", lwd = 3,
col = "blue4")
Sys.sleep(t)
h <- -2.2
polygon(c(xmin, xmin, xmax, xmax), c(ymin, h, h, ymin), border = NA,
col = "azure2")
Sys.sleep(t)
### tronco:
n <- 150
xmint <- -0.6
xmaxt <- -xmint
ymint <- -1.5
ymaxt <- 0.8
x <- runif(n, xmint, xmaxt)
y <- runif(n, ymint, ymaxt)
lines(x, y, col = "darkorange4")
Sys.sleep(t)
### copa:
n <- 400
fx <- 6
xminc <- fx * xmint
xmaxc <- fx * xmaxt
yminc <- ymaxt
fy <- 2.7
ymaxc <- yminc + fy * (ymaxt - ymint)
x <- runif(n, min = xminc, max = xmaxc)
k <- ymaxc
h <- ymaxc - yminc
m <- h / xmaxc
y <- (k - m * abs(x)) * runif(n, 0.9, 1.1)
nto0 <- round(n / (1 + sqrt(1 + (h / m)^2)))
to0 <- sample(1:n, size = nto0)
y[to0] <- yminc
y <- y + runif(n, -0.3, 0.3)
lines(x, y, col = "forestgreen")
Sys.sleep(t)
np <- 500
points(u[1:np], v[1:np], pch = 8, lwd = 1, cex = 0.1, col = rainbow(180)[90])
mess <- switch(language, english = "HAPPY", spanish = "FELIZ",
catalan = "BON")
year <- unlist(strsplit(paste0(year, "!"), ""))
t <- 1
Sys.sleep(t)
x0 <- switch(language, english = -7.4, spanish = -7.4, catalan = -6.8)
x0year <- switch(language, english = 3.1, spanish = 2.6, catalan = 2.4)
h <- -4
text(x0, h, mess, cex = 2.5, font = 2, col = "forestgreen")
d <- 0.8
for (i in 1:length(year)) {
Sys.sleep(0.5 * t)
text(x0 + x0year + (i - 1) * d, h, year[i], cex = 2.5, font = 2,
col = "forestgreen")
}
Sys.sleep(1.5)
myvfont <- c("serif", "bold")
text(x = 0, y = ymaxc + 0.5, labels = "R", srt = 15, vfont = myvfont,
cex = 4, col = "gold")
}
|
bba1310a1f6ff76dd86b8958d3df502a586f8e6e
|
285fdf4489063a0a025e01aed5dea62a4de50c1c
|
/HWFiles/Sync7.R
|
98aa7902dcbae12d31bca676e2ef27b7e74c053b
|
[] |
no_license
|
jlwoznic/IST687
|
7050b35be92b38b630f16d61547e716dda0ac1d5
|
b7474d68eea0cb862fc40ac3bd978a3cdd4d65b3
|
refs/heads/master
| 2022-05-10T16:17:21.227459
| 2020-04-10T16:19:13
| 2020-04-10T16:19:13
| 254,680,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,124
|
r
|
Sync7.R
|
#
#
#
# pre-cursor functions,,,readCensus, Numberize
#
# read in the census data set
#
readCensus <- function() {
urlToRead <-"http://www2.census.gov/programs-surveys/popest/tables/2010-2011/state/totals/nst-est2011-01.csv"
#read the data from the web
testFrame <- read.csv(url(urlToRead))
#remove the first 8 rows (âheader informationâ)
testFrame<-testFrame[-1:-8,]
#only keep the first 5 columns
testFrame<-testFrame[,1:5]
#rename the first column
testFrame$stateName <- testFrame[,1]
testFrame<-testFrame[,-1]
#remove the last rows (tail info)
testFrame<-testFrame[-52:-58,]
#remove the âdotâ from the state name
testFrame$stateName <- gsub("\\.","", testFrame$stateName)
#convert the columns to actual numbers and rename columns
testFrame$april10census <-Numberize(testFrame$X)
testFrame$april10base <-Numberize(testFrame$X.1)
testFrame$july10pop <-Numberize(testFrame$X.2)
testFrame$july11pop <-Numberize(testFrame$X.3)
testFrame <- testFrame[,-1:-4]
#remove the old rownames, which are now confusing
rownames(testFrame) <- NULL
return(testFrame)
}
#
Numberize <- function(inputVector)
{
# Get rid of commas
inputVector<-gsub(",","", inputVector)
# Get rid of spaces
inputVector<-gsub(" ","", inputVector)
return(as.numeric(inputVector))
}
# Packages: maps, zipcode, mapproj, ggmap, ggplot2, gdata
#specify the packages of interest
packages=c("maps","zipcode","mapproj","ggmap","ggplot2","gdata")
#use this function to check if each package is on the local machine
#if a package is installed, it will be loaded
#if any are not, the missing package(s) will be installed and loaded
package.check <- lapply(packages, FUN = function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
})
#verify they are loaded
search()
#
# state.name - R data set
#
str(state.name)
head(state.name)
state.name[3]
#
dummyDF <- data.frame(state.name, stringsAsFactors=FALSE)
dummyDF$state <- tolower(dummyDF$state.name)
dummyDF[3,]
dummyDF$state[3]
#
us <- map_data("state") ## map_data is a function in ggplot2 package
str(us)
us[1,]
us[1000,]
#
#
map.simple <- ggplot(dummyDF, aes(map_id = state))
map.simple <- map.simple +
geom_map(map = us, fill="light blue", color="black")
map.simple
map.simple <- map.simple +
expand_limits(x = us$long, y = us$lat)
map.simple
#
#
#
map.simple <- map.simple +
coord_map() + ggtitle("basic map of USA")
map.simple
#
# ??? map.simple + geom_point(aes(x = -100, y = 30))
dfStates <- readCensus()
str(dfStates)
dfStates$state <- tolower(dfStates$stateName)
#
#
map.popColor <- ggplot(dfStates, aes(map_id = state))
map.popColor <- map.popColor +
geom_map(map = us, aes(fill=july11pop))+scale_color_gradient(low="light blue",high="dark blue")
map.popColor <- map.popColor +
expand_limits(x = us$long, y = us$lat)
map.popColor <- map.popColor +
coord_map() + ggtitle("state population")
map.popColor
#
#latlon <- geocode("syracuse university, syracuse, ny")
#latlon
latlong <- NewLatLon("syracuse university, syracuse, ny")
#
# still using map.popColor, plotting a specific point
#
map.popColor +
geom_point(aes(x = latlon$lon,
y = latlon$lat), color="darkred", size = 3)
#
latlon1 <- NewLatLon("stanford university,stanford, ca")
latlon1
map.popColor +
geom_point(aes(x = latlon1$lon,
y = latlon$lat), color="yellow", size = 3)
latlon1
#
# data set used in HW7
#
mydata <- read.xls("MedianZIP.xlsx")
str(mydata)
head(mydata)
# clean up the data
# change column names to "zip", "Median","Mean", and "Population"
#
colnames(mydata) <- c("zip", "Median", "Mean", "Population")
#
# delete the first row of the dataframe
#
mydata <- mydata[-1,]
head(mydata)
# gsub function is used to perform replacement of matches determined by regular expression matching
# in this case gsub replace all "," in column "Median" with nothing ("")
#
mydata$Median <- gsub(",", "", mydata$Median)
# delete the "," in column "Mean" (replace all "," in column "Mean" with nothing)
mydata$Mean <- gsub(",", "", mydata$Mean)
# delete the "," in column "Population" (replace all "," in column "Population" with nothing)
mydata$Population <- gsub(",","",mydata$Population)
head(mydata)
data(zipcode)
str(zipcode)
head(zipcode)
#
# clean.zipcodes https://www.rdocumentation.org/packages/zipcode/versions/1.0/topics/clean.zipcodes
#
mydata$zip <- clean.zipcodes(mydata$zip)
mydata
zipcode[1,]
#
# merge(mydata, zipcode, by="zip") into a new df dfNew
#
head(mydata)
head(zipcode)
#
dfNew <- merge(mydata, zipcode, by="zip")
str(dfNew)
head(dfNew)
#
dfNew$Median<-as.numeric(dfNew$Median)
dfNew$Population<-as.numeric(dfNew$Population)
str(dfNew)
#
income <- tapply(dfNew$Median, dfNew$state, mean) # calc mean of median by state
str(income)
head(income)
#
state <- rownames(income) # place rownames from income into state variable
head(state)
#
# mean Median Income by State
#
medianIncome <- data.frame(state, income) # create a df with state variable & income variable
str(medianIncome)
head(medianIncome)
#
pop <- tapply(dfNew$Population, dfNew$state, sum ) # sum up population for each state
str(pop)
head(pop)
state <- rownames(pop) # same content as earlier
#
statePop <- data.frame(state, pop) # create new df statePop
#
dfSimple <- merge(medianIncome, statePop, by="state") # create new df by merging df's medianIncome, staeIncome
str(dfSimple)
head(dfSimple)
#
# R data set - state.abb
#
str(state.abb)
head(state.abb)
#
match(dfSimple$state,state.abb) # the relative position of state.abb in dfSimple$state
dfSimple$state
state.abb[45]
state.name[9]
state.name[45]
state.name[c(9,45)]
#
# bring in full state name from abbreviated state name (openintro in HW7)
#
dfSimple$stateName <- state.name[match(dfSimple$state,state.abb)]
str(dfSimple)
head(dfSimple)
dfSimple$stateName <- tolower(dfSimple$stateName)
head(dfSimple)
#
# us <- map_data("state") # performed above, not adding anything new
#
mapIncome <- ggplot(dfSimple, aes(map_id = stateName))
mapIncome <- mapIncome + geom_map(map = us, aes(fill = dfSimple$income))
mapIncome <- mapIncome + expand_limits(x = us$long, y = us$lat)
mapIncome <- mapIncome + coord_map()
mapIncome <- mapIncome + ggtitle("average median Income of the U.S")
mapIncome
#
#
# bring in full state name from abbreviated state name (openintro in HW7)
#
head(dfNew)
dfNew$stateName <- state.name[match(dfNew$state,state.abb)]
dfNew$stateName <- tolower(dfNew$stateName)
head(dfNew)
#
#mapZip <- ggplot(dfNew, aes(map_id = stateName))
#mapZip <- mapZip + geom_map(map=us, fill="black", color="white")
#mapZip <- mapZip + expand_limits(x =us$long, y = us$lat)
#mapZip <- mapZip + geom_point(data = dfNew,aes(x = dfNew$longitude, y = dfNew$latitude, color=dfNew$Median))
#mapZip <- mapZip + coord_map() + ggtitle("Income per zip code")
#mapZip
#
#
# use dfNew to create map and set "stateName" as map ID
#
# remove al & HI from dfNew$state
#
# dfNew$state<-dfNew[dfNew$state!="AK" & dfNew$state!="HI",]
#
mapZip <- ggplot(dfNew, aes(map_id = stateName))
# set the backgroud color to be black and line color to be white
mapZip <- mapZip + geom_map(map=us, fill="black", color="white")
# change the limits of x and y axes to print the whole map
mapZip <- mapZip + expand_limits(x =us$long, y = us$lat)
# plot points on map each "dot" represent a zip code and the color of "dots" is based on median income
mapZip <- mapZip + geom_point(data = dfNew,aes(x = dfNew$longitude, y = dfNew$latitude, color=dfNew$Median))
# make sure the map is not stretched and add a title for the map
mapZip <- mapZip + coord_map() + ggtitle("Income per zip code")
# plot the map
mapZip
#
# subsetting map
#
latlon <- geocode("NYC, ny")
mapZipZoomed <- mapZip + geom_point(aes(x = latlon$lon, y = latlon$lat), color="darkred", size = 3)
mapZipZoomed <- mapZipZoomed + xlim(latlon$lon-10, latlon$lon+10) + ylim(latlon$lat-10,latlon$lat+10) + coord_map()
mapZipZoomed
|
30f3fda04c3d61cfae5b49c00bfefb9f39b40cdd
|
f119a12f993427f39e51b19d182ac18cb784984f
|
/MOD13Q1/003_MOD-ExtentFix.R
|
8e5ee1bb2f189615a3ff67d0bfecab8f9d9d063f
|
[] |
no_license
|
JepsonNomad/KangerSis_green-up
|
f05eca3c8849fceb45b52a49df91d0cd72cf929d
|
92457a808d072a7caab166cc0e5f6f2276f94b26
|
refs/heads/master
| 2023-03-30T01:21:23.749643
| 2021-04-05T21:33:25
| 2021-04-05T21:33:25
| 278,746,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 992
|
r
|
003_MOD-ExtentFix.R
|
# Due to extent issue in Earth Engine export
# This script resets spatial metadata on raster objects to reflect correct projection information.
library(raster)
library(rgdal)
library(stringr)
setwd("PATH/TO/DIR/MOD13Q1/")
#### Projection information ----
MOD1 = raster("MOD13Q1_comp_DOY.tif", 11) # choose a random raster to get crs
ROI = readOGR(".","Kanger_Region_SP") # load shapefile
ROIprojM = spTransform(ROI, CRSobj = proj4string(MOD1))
t_ext = extent(ROIprojM) # target extent object
t_ext
#### Extent fix done using gdal in the terminal ----
"
cd PATH/TO/DIR/MOD13Q1/
gdal_translate -a_ullr -2434880 7477786 -2166206 7315551 MOD13Q1_comp_NDVI.tif MOD13Q1_stack_NDVI.tif
gdal_translate -a_ullr -2434880 7477786 -2166206 7315551 MOD13Q1_comp_DOY.tif MOD13Q1_stack_DOY.tif
gdal_translate -a_ullr -2434880 7477786 -2166206 7315551 MOD13Q1_comp_QA.tif MOD13Q1_stack_QA.tif
"
#### Plot results ----
myraster = raster("MOD13Q1_stack_NDVI.tif", 11)
plot(myraster)
lines(ROIprojM)
|
4a9798273b16439ba542a837c72634fe9a768337
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/ETAS/man/catalog.Rd
|
33627788653b5f7177a24e7e86a2b6028ce9bbe6
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,098
|
rd
|
catalog.Rd
|
\name{catalog}
\alias{catalog}
\title{Create an Earthquake Catalog}
\description{
Creates an object of class \code{"catalog"} representing
an earthquake catalog dataset. An earthquake catalog is a
chronologically ordered list of time, epicenter and magnitude
of all recorded earthquakes in geographical region during
a specific time period.
}
\usage{
catalog(data, time.begin=NULL, study.start=NULL,
study.end=NULL, study.length=NULL,
lat.range=NULL, long.range=NULL,
region.poly=NULL, mag.threshold=NULL,
flatmap=TRUE, dist.unit = "degree", tz="GMT")
}
\arguments{
\item{data}{A \code{data.frame} containing date, time,
latitude, longitude and magnitude of earthquakes.}
\item{time.begin}{The beginning of time span of the catalog.
A character string or an object that can be converted to
date-time (calendar dates plus time to the nearest second) by
\code{as.POSIXlt}. The default \code{NULL} sets it
to the date-time of the first event.}
\item{study.start}{The start of the study period.
A character string or an object that can be converted to
date-time by \code{as.POSIXlt}. If not specified (\code{NULL}),
then \code{time.begin} is used.}
\item{study.end}{The end of the study period.
A character string or an object that can be converted to
date-time by \code{as.POSIXlt}. The default \code{NULL} sets it
to the date-time of the last event.}
\item{study.length}{A single numeric value specifying the length
of the study period in decimal days. Incompatible with
\code{study.end}: either \code{study.end} or \code{study.length}
can be specified, but not both.}
\item{lat.range}{The latitude range of a rectangular study region.
A numeric vector of size 2 giving (latmin, latmax). By default
(\code{NULL}) the range of the latitudes of events is used.}
\item{long.range}{The longitude range of a rectangular study region.
A numeric vector of size 2 giving (longmin, longmax). By default
(\code{NULL}) the range of the longitudes of events is used.}
\item{region.poly}{Polygonal boundary of a non-rectangular
study region. A list with components \bold{lat} and \bold{long}
of equal length specifying the coordinates of the vertices of
a polygonal study region. The vertices must be listed in
\bold{anticlockwise} order.}
\item{mag.threshold}{The magnitude threshold of the catalog.
A positive numeric value. The default (\code{NULL}) sets it to
the minimum magnitude of all events.}
\item{flatmap}{Logical flag indicating whether to transform
the spherical coordinates \eqn{(long, lat)}{(long, lat)} on the earth
surface to flat map (planar) coordinates \eqn{(x, y)}{(x, y)}
in order to approximate the
great-circle distance on the sphere by the corresponding Euclidean
distance on the flat map.}
\item{dist.unit}{A character string specifying the unit of geographical
coordinates and spatial distances between events. Options
are \code{"degree"} (the default case) and \code{"km"}.}
\item{tz}{A character string specifying the time zone to be used
for the date-time conversion in \code{as.POSIXlt}.
The default \code{"GMT"} is the UTC (Universal Time, Coordinated).}
}
\value{
An object of class \code{"catalog"} containing an earthquake
catalog dataset.
}
\details{
The \code{data} is required to have at least 5 columns with names
\code{date}, \code{time}, \code{lat}, \code{long} and \code{mag}
containing, respectively, the date, time, latitude, longitude
and magnitude of each event in the catalog.
The geographical study region can be rectangular or polygonal:
\itemize{
\item
\bold{rectangular study region} can be specified by \code{lat.range}
and \code{long.range} which must be numeric vectors of length 2.
\item
\bold{polygonal study region} can be specified by \code{region.poly}
which contains coordinates of the vertices of the polygon. It must
be either a \code{list} with components \bold{lat} and \bold{long}
of equal length or a \code{data.frame} with columns \bold{lat}
and \bold{long}. The vertices must be listed in
\emph{anticlockwise} order and no vertex should be repeated
(i.e. do not repeat the first vertex).
}
The function \code{\link{inside.owin}} in the \code{spatstat}
is used to indicate whether events lie inside the study region.
Only events inside the study region and the study period
(\code{study.start}, \code{study.end}) are considered as
\emph{target} events. Other events are assumed to be
\emph{complementary} events.
If the events in \code{data} are not chronologically sorted,
then a warning will be produced and the events will be sorted
in ascending order with respect to time of occurrence.
If \code{flatmap=TRUE}, longitude-latitude coordinates convert to
flat map coordinates:
\itemize{
\item if \code{dist.unit="degree"}, then the
Equirectangular projection
\deqn{x = \cos(cnt.lat/180 \pi) (long - cnt.long)}{x = cos(cnt.lat/180 * pi) *(long - cnt.long)}
and \eqn{y = lat - cnt.lat}{y = lat - cnt.lat}
is used to obtain the flat map coordinates \eqn{(x, y)}{(x, y)} in
degrees, where \eqn{cnt.lat}{cnt.lat} and \eqn{cnt.long}{cnt.long} are,
respectively, the latitude and longitude of the centroid of the
geographical region.
\item if \code{dist.unit="km"}, then the projection
\deqn{x = 111.32 \cos(lat/180 \pi) long}{x = 111.32 * cos(lat/180 * pi) * long}
and \eqn{y = 110.547 lat}{y = 110.547 * lat}
is used where \eqn{x}{x} and \eqn{y}{y} are in (approximate) kilometers.
}
}
\seealso{
\code{etas}.
}
\references{
Zhuang J (2012).
Long-term Earthquake Forecasts Based on the Epidemic-type Aftershock
Sequence (ETAS) Model for Short-term Clustering.
\emph{Research in Geophysics}, \bold{2}(1), 52--57.
\href{http://dx.doi.org/10.4081/rg.2012.e8}{doi:10.4081/rg.2012.e8}.
}
\examples{
summary(iran.quakes)
# creating a catalog with rectangular study region
iran.cat <- catalog(iran.quakes, time.begin="1973/01/01",
study.start="1985/01/01", study.end="2016/01/01",
lat.range=c(25, 42), long.range=c(42, 63),
mag.threshold=4.5)
print(iran.cat)
\dontrun{
plot(iran.cat)
}
# equivalently, specifying the length of the study period
iran.cat2 <- catalog(iran.quakes, time.begin="1973/01/01",
study.start="1985/01/01", study.length=11322,
lat.range=c(25, 42), long.range=c(42, 63),
mag.threshold=4.5)
print(iran.cat2)
# specifying a polygonal geographical region
jpoly <- list(long=c(134.0, 137.9, 143.1, 144.9, 147.8,
137.8, 137.4, 135.1, 130.6), lat=c(31.9, 33.0, 33.2,
35.2, 41.3, 44.2, 40.2, 38.0, 35.4))
# creating a catalog with polygonal study region
japan.cat <- catalog(japan.quakes, time.begin="1966-01-01",
study.start="1970-01-01", study.end="2010-01-01",
region.poly=jpoly, mag.threshold=4.5)
print(japan.cat)
\dontrun{
plot(japan.cat)
}
}
\author{Abdollah Jalilian
\email{jalilian@razi.ac.ir}
}
\keyword{spatial}
\keyword{math}
\keyword{earthquake modeling}
|
587aa9e2f933acc0d9b7274884725d108b796a65
|
9adc8c6da1ed43422fe584a522c94a4433464a4c
|
/man/prepareTransactions.Rd
|
93a4e1cf548676e4593c8d5957f26420a91e7023
|
[] |
no_license
|
klainfo/arulesCBA
|
314206c434d8d0986baaf7f3ef27ed86ea9add1e
|
627a318caa984177b7faf02db37b498bcccbc036
|
refs/heads/master
| 2022-04-19T08:26:11.582798
| 2020-04-20T12:50:08
| 2020-04-20T12:50:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,061
|
rd
|
prepareTransactions.Rd
|
\name{prepareTransactions}
\alias{prepareTransactions}
\title{Helper to Convert Data into Transactions}
\description{
Converts a data.frame into transactions by applying class-based discretization.
}
\usage{
prepareTransactions(formula, data, disc.method = "mdlp", match = NULL)
}
\arguments{
\item{formula}{ the formula. }
\item{data}{ a data.frame with the data.}
\item{disc.method}{Discretization method used to discretize continuous variables if data is a data.frame
(default: \code{"mdlp"}). See \code{\link{discretizeDF.supervised}} for more supervised discretization methods.}
\item{match}{ typically \code{NULL}. Only used internally if data is a already a set of transactions.}
}
\value{
An object of class \code{\link[arules]{transactions}} from \pkg{arules} with an attribute called \code{"disc_info"} that
contains information on the used discretization for each column.
}
\author{
Michael Hahsler
}
\seealso{
\code{\link[arules]{transactions}}.
}
\examples{
data("iris")
iris_trans <- prepareTransactions(Species ~ ., iris)
iris_trans
}
|
8b301b3dab523335f1c062ab73d8d9499107fa0a
|
98e3d6171bbde7bcfea9158d6ef1e72f57e86ba6
|
/R/hide-variables.R
|
0a2069c9615d5ae17dd6d5edd76e27a940827adf
|
[] |
no_license
|
mainwaringb/rcrunch
|
b818ce8a542a8f0dadd811f448df20f0857b16fe
|
a162d8e314773a9479a1ae92818b321a94b4a2ee
|
refs/heads/master
| 2022-11-28T15:54:13.310104
| 2020-06-14T18:07:08
| 2020-06-14T18:07:08
| 262,774,601
| 0
| 0
| null | 2020-06-14T14:32:21
| 2020-05-10T11:43:46
|
R
|
UTF-8
|
R
| false
| false
| 2,438
|
r
|
hide-variables.R
|
setMethod("hidden", "CrunchDataset", function(x) hidden(folders(x)))
setMethod("hidden", "VariableCatalog", function(x) hidden(folders(x)))
setMethod("hidden", "VariableFolder", function(x) {
return(VariableFolder(crGET(shojiURL(rootFolder(x), "catalogs", "hidden"))))
})
#' Hide and Unhide Variables
#' @param x a Variable or subset of a VariableCatalog to hide or unhide
#' @return (invisibly) the Variable or VariableCatalog, hidden or unhidden
#' @name hide
#' @aliases hide unhide
#' @seealso [`hideVariables`]
NULL
#' @rdname hide
#' @export
setMethod("hide", "CrunchVariable", function(x) {
.moveToFolder(hidden(rootFolder(x)), x)
# TODO: should these refresh?
invisible(x)
})
#' @rdname hide
#' @export
setMethod("hide", "VariableCatalog", function(x) {
.moveToFolder(hidden(rootFolder(x)), x)
invisible(x)
})
#' @rdname hide
#' @export
setMethod("unhide", "CrunchVariable", function(x) {
.moveToFolder(rootFolder(x), x)
invisible(x)
})
#' @rdname hide
#' @export
setMethod("unhide", "VariableCatalog", function(x) {
.moveToFolder(rootFolder(x), x)
invisible(x)
})
#' Hide and unhide variables within a dataset
#' @param dataset the Dataset to modify
#' @param x `dataset`, for `hiddenVariables<-`
#' @param variables names or indices of variables to (un)hide
#' @param value `variables`, for `hiddenVariables<-`
#' @return (invisibly) `dataset` with the specified variables (un)hidden
#' @seealso [`hide`]
#' @export
hideVariables <- function(dataset, variables) {
dataset <- mv(dataset, variables, hidden(dataset))
return(invisible(refresh(dataset)))
}
#' @rdname hideVariables
#' @export
`hiddenVariables<-` <- function(x, value) hideVariables(x, value)
#' @rdname hideVariables
#' @export
unhideVariables <- function(dataset, variables) {
dataset <- mv(dataset, variables, folders(dataset))
return(invisible(refresh(dataset)))
}
#' Show the names of a dataset's hidden variables
#' @param dataset the Dataset
#' @param key the Variable attribute to return. Default is "alias", following
#' `getOption("crunch.namekey.dataset")`.
#' @return a vector of the names of Variables marked as hidden.
#' @export
hiddenVariables <- function(dataset, key = namekey(dataset)) {
hv <- hidden(dataset)
if (length(hv)) {
return(sort(vapply(index(hv), vget(key), character(1),
USE.NAMES = FALSE
)))
} else {
return(c())
}
}
|
e4e88d03ab3eaac60dcc2ff6944a951210af63e0
|
91fde329639324b8b5ca4684b8e93b66ea2e93cb
|
/03_Visualizations/Categorical.R
|
a1500d9a40d915ff30f957ace589a5db930bdb59
|
[] |
no_license
|
Alice16/DV_RProject3
|
a1e5ce24fba3ea306beeb1f71e81318e23be0707
|
2ebad385b538393d2f9fff64139d523dd2793b05
|
refs/heads/master
| 2021-01-22T08:52:49.709502
| 2015-03-06T16:46:41
| 2015-03-06T16:46:41
| 31,392,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,196
|
r
|
Categorical.R
|
myplot <- function(df, x) {
names(df) <- c("x", "n")
ggplot(df, aes(x=x, y=n)) + geom_point()
}
categoricals <- eval(parse(text=substring(getURL(URLencode('http://129.152.144.84:5001/rest/native/?query="select * from LEGISLATOR_ROLE"'), httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_yj2946', PASS='orcl_yj2946', MODE='native_mode', MODEL='model', returnFor = 'R', returnDimensions = 'True'), verbose = TRUE), 1, 2^31-1)))
l <- list()
for (i in names(dfR)) {
if (i %in% categoricals[[1]]) {
r <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select \\\""i"\\\", count(*) n from LEGISLATOR_ROLE group by \\\""i"\\\" "'),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_yj2946', PASS='orcl_yj2946', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON', i=i), verbose = TRUE)))
p <- myplot(r,i)
print(p)
l[[i]] <- p
}
}
png("./../03_Visualizations/Categoricals.png", width = 25, height = 20, units = "in", res = 72)
grid.newpage()
pushViewport(viewport(layout = grid.layout(2, 12)))
print(l[[1]]+ggtitle('Legislator ID')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("Legislator ID"),y=paste("Occurances"))+theme(axis.text.x=element_text(angle=90, size=5))+scale_x_discrete(breaks = c("TXL000513","TXL000212","TXL000284","TXL000329","TXL000439")), vp = viewport(layout.pos.row = 1, layout.pos.col = 1:4))
print(l[[2]]+ggtitle('Type')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("Member Type"),y=paste("Occurances")), vp = viewport(layout.pos.row = 1, layout.pos.col = 5:8))
print(l[[3]]+ggtitle('Chamber')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("Chamber Type"),y=paste("Occurances")), vp = viewport(layout.pos.row = 1, layout.pos.col = 9:12))
print(l[[4]]+ggtitle('Party')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("Party Affiliation"),y=paste("Occurances")), vp = viewport(layout.pos.row = 2, layout.pos.col = 1:4))
print(l[[5]]+ggtitle('Committee ID')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("Committee ID"),y=paste("Occurances"))+theme(axis.text.x=element_text(angle=90, size=5))+scale_x_discrete(breaks = c("TXC000035","TXC000101","TXC000062","TXC000057","TXC000033")), vp = viewport(layout.pos.row = 2, layout.pos.col = 5:8))
print(l[[6]]+ggtitle('Committee')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("Committee Name"),y=paste("Occurances"))+theme(axis.text.x=element_text(angle=90, size=5))+scale_x_discrete(breaks = c("Business & Industry","Technology","Energy Resources","Calendars","Insurance")), vp = viewport(layout.pos.row = 2, layout.pos.col = 9:12))
dev.off()
myplot1 <- function(df, x) {
names(df) <- c("x")
ggplot(df, aes(x=x)) + geom_histogram()
}
l1 <- list()
for (i in names(dfR)) {
if (i %in% categoricals[[2]]) {
r1 <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select \\\""i"\\\" from LEGISLATOR_ROLE where \\\""i"\\\" is not null"'),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_yj2946', PASS='orcl_yj2946', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON', i=i), verbose = TRUE)))
p <- myplot1(r1,i)
print(p)
l1[[i]] <- p
}
}
png("./../03_Visualizations/Measures.png", width = 25, height = 10, units = "in", res = 72)
grid.newpage()
pushViewport(viewport(layout = grid.layout(1, 12)))
print(l1[[1]]+ggtitle('Term')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("Term"),y=paste("Occurances")), vp = viewport(layout.pos.row = 1, layout.pos.col = 1:4))
print(l1[[2]]+ggtitle('District')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("District"),y=paste("Occurances")), vp = viewport(layout.pos.row = 1, layout.pos.col = 5:8))
print(l1[[3]]+ggtitle('Serial ID')+theme(plot.title=element_text(size=20, face="bold", vjust=2))+labs(x=paste("Serial ID"),y=paste("Occurances")), vp = viewport(layout.pos.row = 1, layout.pos.col = 9:12))
dev.off()
|
90e17087d0531f16ed1f8df055a0f0af33b7a5d2
|
e374a5e7aaf75fb4e8314ab6fe90377898823d8a
|
/tests/testthat.R
|
6ecaacd7145bf81baad90068b48dadb87976c8ae
|
[] |
no_license
|
abhinav-piplani/earthquakeR
|
35ed1006b1eb799ab88b57ffb381c14283aaac7f
|
fc2501ef86807de5a9e3aeaf8def124425e525b3
|
refs/heads/master
| 2021-05-12T04:05:26.335500
| 2018-01-14T17:13:22
| 2018-01-14T17:13:22
| 117,152,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,673
|
r
|
testthat.R
|
library(testthat)
library(earthquakeR)
library(lubridate)
# Check eq_data
test_that("eq_data", {
data <- eq_data()
expect_true(is.data.frame(data))
})
# Check eq_clean_data
test_that("eq_clean_data", {
raw_data <- data.frame(251,7,9,"25.500","35.500","GREECE: CRETE")
colnames(raw_data) <- c("YEAR", "MONTH", "DAY", "LONGITUDE",
"LATITUDE", "LOCATION_NAME")
data <- raw_data %>% eq_clean_data()
expect_equal(data$neg.date[1], 0)
})
# Check eq_location_clean
test_that("eq_location_clean", {
raw_data <- data.frame(251,7,9,"25.500","35.500","GREECE: CRETE")
colnames(raw_data) <- c("YEAR", "MONTH", "DAY", "LONGITUDE",
"LATITUDE", "LOCATION_NAME")
data <- raw_data %>% eq_clean_data() %>% eq_location_clean()
expect_equal(data$LOCATION_NAME[1], "Greece Crete")
})
# Check geom_timeline
test_that("geom_timeline", {
g <- eq_clean_data(eq_data()) %>% eq_location_clean() %>%
dplyr::filter(year(DATE) > 2000 & COUNTRY %in% c("CANADA")) %>%
dplyr::mutate(TOTAL_DEATHS = as.numeric(TOTAL_DEATHS),
EQ_PRIMARY = as.numeric(EQ_PRIMARY)) %>%
ggplot2::ggplot(ggplot2::aes(x = DATE,
y = COUNTRY,
colour = TOTAL_DEATHS,
size = EQ_PRIMARY
)) +
geom_timeline()
expect_is(g, "ggplot")
})
# Check geom_timeline_label
test_that("geom_timeline_label",{
g <- eq_clean_data(eq_data()) %>% eq_location_clean() %>%
dplyr::filter(year(DATE) > 2000 & COUNTRY %in% c("CANADA")) %>%
dplyr::mutate(TOTAL_DEATHS = as.numeric(TOTAL_DEATHS),
EQ_PRIMARY = as.numeric(EQ_PRIMARY)) %>%
ggplot2::ggplot(ggplot2::aes(x = DATE,
y = COUNTRY,
colour = TOTAL_DEATHS,
size = EQ_PRIMARY
)) +
geom_timeline() +
geom_timeline_label()
expect_is(g, "ggplot")
})
# Check eq_map
test_that("eq_map", {
temp.object <- eq_clean_data(eq_data()) %>% eq_location_clean() %>%
dplyr::filter(year(DATE) > 2000 & !neg.date & COUNTRY %in% c("MEXICO")) %>%
dplyr::mutate(popup_text = eq_create_label(.)) %>%
eq_map(annot_col = "popup_text")
expect_is(temp.object, "leaflet")
expect_is(temp.object, "htmlwidget")
})
# Check eq_create_label
test_that("eq_create_label", {
data <- data.frame("XYZ", 3.2, 47)
colnames(data) <- c("LOCATION_NAME", "EQ_PRIMARY", "TOTAL_DEATHS")
labels <- data %>% eq_create_label()
expect_equal(length(labels), 1)
expect_equal(labels, "<b>Location: </b> XYZ <br/> <b>Magnitude: </b> 3.2 <br/> ")
})
|
d80ead0c7e1a8c358e55c388a27b6258a2d45645
|
232fc9b238a636bf4b068dc6eb75dd65df6cf410
|
/CLEANUP/02_variables.R
|
cc0dcae3e87a1fe902db6fc11c21d753fd6d768e
|
[] |
no_license
|
the-data-center/Who-Lives
|
bca55b4a94b7c79660b642f222e13de8992294df
|
7c98a4c2d74d2080b9ae81d76fa1a162fab07302
|
refs/heads/master
| 2023-08-03T11:08:44.824444
| 2023-07-31T19:02:29
| 2023-07-31T19:02:29
| 168,587,086
| 0
| 0
| null | 2023-07-31T19:02:30
| 2019-01-31T19:55:06
|
HTML
|
UTF-8
|
R
| false
| false
| 407
|
r
|
02_variables.R
|
mycensuskey = "b6844db29933c9dce9e13fa37f1d015281001b95"
#GET CURRENT YEAR FROM BLS CPI CALCULATOR
cpi04 <- 1.41 #$1 in 2004 = $X in current year
cpi79 <- 3.83
cpi89 <- 2.16
cpi99 <- 1.59
cpi10 <- 1.21
# the current/most recent year - the year the data you're updating represents
year <- 2021
yearPEP <- 2021
year.char <- c("2021")
yearPEP.char <- c("2021")
windowsFonts("Asap" = windowsFont("Asap"))
|
00a22612b28db4dee0e38adbc479c7f8f44a426d
|
80f66a992fc733aab681dfa1e103db8105609725
|
/ZZ_Archived/NYCTRS_Results_SensitivityTests2.R
|
03e9960a679105766e786fb37d29671b4e66ee2f
|
[] |
no_license
|
yimengyin16/RSF_NYCTRS
|
cd5cfcad260f1d03fe06f472a263f2e96325acb1
|
82326660d9bc525089772a84837851dfad3713f1
|
refs/heads/master
| 2021-06-05T16:12:11.367594
| 2019-03-17T14:30:25
| 2019-03-17T14:30:25
| 152,126,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,372
|
r
|
NYCTRS_Results_SensitivityTests2.R
|
# Risk measures for NYCTRS
library(knitr)
library(data.table)
library(gdata) # read.xls
library(plyr)
library(dplyr)
options(dplyr.print_min = 100) # default is 10
options(dplyr.print_max = 100) # default is 20
library(ggplot2)
library(magrittr)
library(tidyr) # gather, spread
library(foreach)
library(doParallel)
library(microbenchmark)
library(readxl)
library(stringr)
library(zoo)
library("readxl")
library("XLConnect") # slow but convenient because it reads ranges; NOTE: I had to install Java 64-bit on Windows 10 64-bit to load properly
library(xlsx)
library("btools")
library("scales")
library(gridExtra)
library(grid)
library(plotly)
source("Functions.R")
#*****************************************************
## Notes ####
#*****************************************************
# Analysis 1: Impact of TDA
# Three scenarios
# - no TDA
# - TDA payments are amortized
# - TDA payments are not amortized
# Analysis 2: Impact of funcing policy
# Three scenarios
# - Plan policy (TDA payments are amortized)
# - More backloaded amortization policies: cd/cp, open/closed, 15->30
# - One-Year-Lag-Method
#*****************************************************
## Defining paths for inputs and outputs ####
#*****************************************************
IO_folder <- "Results/"
# Outputs_folder <- "Results/Graphs_report/"
#*****************************************************
## Loading data ####
#*****************************************************
## Outputs of pension finance
get_results <- function(IO_folder, Pattern = "^Outputs"){
fn <- function(x) {
load(paste0(IO_folder, "/", x))
# if("results.t7" %in% names(outputs_list)){
# df_out <- bind_rows(outputs_list$results,
# outputs_list$results.t7,
# outputs_list$results.xt7)
# return(df_out)
# } else {
# return(outputs_list$results)
# }
return(outputs_list$results)
}
file_select <- dir(IO_folder, Pattern)
results_all <- adply(file_select, 1, fn) %>% select(-X1)
}
results_all <- get_results(IO_folder) %>% select(runname, sim, year, everything())
#*****************************************************
## Selecting runs and calculating risk measures ####
#*****************************************************
runs_test <- c("t4a_TDAamortAS_OYLM",
"t4a_HighYos_nUp",
"t4a_LowYos_nUp",
"t4a_HighYos_sUp",
"t4a_LowYos_sUp")
runs_test_labels <- c("Baseline",
"High YOS members: number up",
"Low YOS members: number up",
"High YOS members: salary up",
"Low YOS members: salary up"
)
runs_all <- runs_test
runs_all_labels <- runs_test_labels
# Calculate total final ERC rate for runs with DC reform (include ERC to DC in ERC.final_PR)
# in the past 5 years ERC rate
#ERC_rate_5y <- data.frame(year = 2012:2016, ERC_rate_5y = c(0.0869,0.0915, 0.0915 ,0.0998, 0.1078))
# ERC_rate_5y <- c(8.69,9.15, 9.15 ,9.98, 10.78)
#*****************************************************
## Computing risk measures ####
#*****************************************************
df_all.stch <-
results_all %>%
filter(runname %in% runs_all) %>%
filter(sim >= 0, year <= 2048)
df_all.stch <-
df_all.stch %>%
select(runname, sim, year, AL, MA, PR, ERC_PR, ERC_noTDA_PR, i.r, i.r.wTDA) %>%
group_by(runname, sim) %>%
mutate(FR_MA = 100 * MA / AL,
FR40less = cumany(FR_MA <= 40),
FR50less = cumany(FR_MA <= 50),
FR100more = cumany(FR_MA >= 100),
FR100more2 = FR_MA >= 100,
ERC_high = cumany(ERC_PR >= 60),
ERC_hike = cumany(na2zero(ERC_PR - lag(ERC_PR, 5) >= 10)),
) %>%
group_by(runname, year) %>%
summarize(FR40less = 100 * sum(FR40less, na.rm = T)/n(),
FR50less = 100 * sum(FR50less, na.rm = T)/n(),
FR100more = 100 * sum(FR100more, na.rm = T)/n(),
FR100more2= 100 * sum(FR100more2, na.rm = T)/n(),
ERC_high = 100 * sum(ERC_high, na.rm = T)/n(),
ERC_hike = 100 * sum(ERC_hike, na.rm = T)/n(),
FR.q10 = quantile(FR_MA, 0.1,na.rm = T),
FR.q25 = quantile(FR_MA, 0.25, na.rm = T),
FR.q50 = quantile(FR_MA, 0.5, na.rm = T),
FR.q75 = quantile(FR_MA, 0.75, na.rm = T),
FR.q90 = quantile(FR_MA, 0.9, na.rm = T),
ERC_PR.q10 = quantile(ERC_PR, 0.1, na.rm = T),
ERC_PR.q25 = quantile(ERC_PR, 0.25, na.rm = T),
ERC_PR.q50 = quantile(ERC_PR, 0.5, na.rm = T),
ERC_PR.q75 = quantile(ERC_PR, 0.75, na.rm = T),
ERC_PR.q90 = quantile(ERC_PR, 0.9, na.rm = T)
) %>%
mutate(runname.fct = factor(runname, levels = runs_all, labels = runs_all_labels)) %>%
arrange(runname.fct) %>%
ungroup()
df_all.stch %>%
filter(year %in% c(2016, seq(2020, 2045, 5)))
df_all.stch %>%
filter(year %in% 2045)
results_all %>%
select(runname, sim, year, AL, MA, AA, FR) %>%
filter(sim == 1, year %in% c(2016:2045), runname %in% runs_test)
#*****************************************************
## Explore simple calibration approach ####
#*****************************************************
results_all %>%
filter(runname %in% runs_all, sim == 0, year == 2016) %>%
select(runname, year, FR_MA, ERC_PR, MA, AA, AL, NC, SC, PR )
#
# df <-
# results_all %>%
# filter(runname %in% runs_all, sim == 0, year == 2016) %>%
# select(runname, year, ERC_PR, MA, AA, AL, AL.act.laca, AL.act.v, AL.act.death, AL.act.disbRet, AL.la, NC, SC, PR ) %>% t
#
# rowNames <- rownames(df)[-c(1,2) ]
# colNames <- df[1,]
#
# df <- df[-c(1,2), ] %>% as.data.frame()
# names(df) <- colNames
# df$t4a_HighYos_nUp
# df %<>%
# mutate(variable = rowNames) %>%
# select(variable, everything()) %>%
# mutate_at(vars(-variable), funs( as.numeric(levels(.)[.]))) %>%
# as.tbl
# df
#
#
# df %<>%
# mutate(ratio_HighYos_nUp =1/(t4a_HighYos_nUp / t4a_TDAamort),
# ratio_LowYos_nUp =1/(t4a_LowYos_nUp /t4a_TDAamort),
# ratio_HighYos_sUp =1/(t4a_HighYos_sUp /t4a_TDAamort),
# ratio_LowYos_sUp =1/(t4a_LowYos_sUp /t4a_TDAamort))
# df
#
# df_sensitivity <- df
# save(df_sensitivity, file = "df_sensitivity.RData")
#*****************************************************
## Compare key results ####
#*****************************************************
# Risk of Low funded ratio:
# Under immediate recognition and payment of TDA interests, risk measures are identical across
# all test scenarios.
results_all %>%
select(runname, sim, year, AL, NC_PR, ERC_PR, PVFB, B, PR) %>%
mutate(runname = factor(runname, levels = runs_test, labels = runs_test_labels)) %>%
filter(sim == 0, year %in% c(2016))
df_all.stch %>%
select(runname, year, FR40less) %>%
mutate(runname = factor(runname, levels = runs_test, labels = runs_test_labels)) %>%
spread(runname, FR40less) %>%
filter(year %in% c(2016, 2020, 2030, 2040, 2045))
df_all.stch %>%
select(runname, year, ERC_hike) %>%
mutate(runname = factor(runname, levels = runs_test, labels = runs_test_labels)) %>%
spread(runname, ERC_hike) %>%
filter(year %in% c(2016, 2020, 2030, 2040, 2045))
df_all.stch %>%
select(runname, year, ERC_TDA_hike) %>%
mutate(runname = factor(runname, levels = runs_test, labels = runs_test_labels)) %>%
spread(runname, ERC_TDA_hike) %>%
filter(year %in% c(2016, 2020, 2025, 2030, 2040, 2045))
df_all.stch %>%
select(runname, year, ERC_high) %>%
mutate(runname = factor(runname, levels = runs_test, labels = runs_test_labels)) %>%
spread(runname, ERC_high) %>%
filter(year %in% c(2016, 2020, 2030, 2040, 2045))
df_all.stch %>%
select(runname, year, ERC_TDA_high) %>%
mutate(runname = factor(runname, levels = runs_test, labels = runs_test_labels)) %>%
spread(runname, ERC_TDA_high) %>%
filter(year %in% c(2016, 2020, 2030, 2040, 2045))
#*****************************************************
## Analysis 1 Impact of TDA ####
#*****************************************************
# Tables of key risk measures
df_all.stch %>%
filter(runname %in% runs_test, year %in% c(2030, 2045)) %>%
arrange(year)
# Figure: Risk of low funded ratio (FR40less and FR50less, 5 lines in each graph)
fig.title <- "Probability of funded ratio below 40% or 50% in any year up to the given year"
fig.subtitle <- "Baseline case and alternative year-of-service distributions of active members"
fig_FR40less <- df_all.stch %>%
filter(runname %in% runs_test) %>%
# mutate(runname = factor(runname, labels = c(lab_s1, lab_s2))) %>%
select(runname.fct, year, FR40less, FR50less) %>%
#mutate(FR40less.det = 0) %>%
gather(variable, value, -year, -runname.fct) %>%
mutate(variable = factor(variable,
levels = c("FR40less", "FR50less"),
labels = c("Funded ratio below 40%", "Funded ratio below 50%"))) %>%
ggplot(aes(x = year, y = value, color = runname.fct, shape = runname.fct)) + theme_bw() +
facet_grid(.~ variable) +
geom_point(size = 2) + geom_line() +
coord_cartesian(ylim = c(0,50)) +
scale_y_continuous(breaks = seq(0,200, 5)) +
scale_x_continuous(breaks = c(2016, seq(2020, 2045, 5), 2048)) +
scale_color_manual(values = c("black",RIG.blue, RIG.green, RIG.purple, RIG.orange), name = "") +
scale_shape_manual(values = c(17,16,15, 18, 19), name = "") +
labs(title = fig.title,
subtitle = fig.subtitle,
x = NULL, y = "Probability (%)") +
guides(color = guide_legend(keywidth = 1.5, keyheight = 3))+
RIG.theme()
fig_FR40less
# Figure: Risk of ERC hike (3 lines in a single graph)
fig.title <- "Probability of employer contribution rising more than 10% of payroll \nin a 5-year period at any time prior to and including the given year"
fig.subtitle <- "Baseline case and alternative year-of-service distributions of active members"
fig_ERChike <- df_all.stch %>%
filter(runname %in% runs_test) %>%
select(runname.fct, year, ERC_hike) %>%
#mutate(ERChike.det = 0) %>%
# gather(type, value, -year, -runname) %>%
ggplot(aes(x = year, y = ERC_hike, color = runname.fct, shape = runname.fct)) + theme_bw() +
geom_point(size = 2) + geom_line() +
coord_cartesian(ylim = c(0,100)) +
scale_y_continuous(breaks = seq(0,200, 10)) +
scale_x_continuous(breaks = c(2016, seq(2020, 2045, 5), 2048)) +
scale_color_manual(values = c("black",RIG.blue, RIG.green, RIG.purple, RIG.orange), name = "") +
scale_shape_manual(values = c(17,16, 15, 18, 19), name = "") +
labs(title = fig.title,
subtitle = fig.subtitle,
x = NULL, y = "Probability (%)") +
guides(color = guide_legend(keywidth = 1.5, keyheight = 3))+
RIG.theme()
fig_ERChike
# Figure: Risk of high ERC (3 lines in a single graph)
fig.title <- "Probability of employer contribution rising above 60% of payroll \nat any time prior to and including the given year"
fig.subtitle <- "Baseline case and alternative year-of-service distributions of active members"
fig_ERChigh <- df_all.stch %>%
filter(runname %in% runs_test) %>%
select(runname.fct, year, ERC_high) %>%
#mutate(ERChike.det = 0) %>%
# gather(type, value, -year, -runname) %>%
ggplot(aes(x = year, y = ERC_high, color = runname.fct, shape = runname.fct)) + theme_bw() +
geom_point(size = 2) + geom_line() +
coord_cartesian(ylim = c(0,50)) +
scale_y_continuous(breaks = seq(0,200, 10)) +
scale_x_continuous(breaks = c(2016, seq(2020, 2045, 5), 2048)) +
scale_color_manual(values = c("black",RIG.blue, RIG.green, RIG.purple, RIG.orange), name = "") +
scale_shape_manual(values = c(17,16, 15, 18, 19), name = "") +
labs(title = fig.title,
subtitle = fig.subtitle,
x = NULL, y = "Probability (%)") +
guides(color = guide_legend(keywidth = 1.5, keyheight = 3))+
RIG.theme()
fig_ERChigh
# # Figure: Distribution of FR
#
# fig.title <- "Distribution of employer contribution rates without TDA transfers across simulations"
# fig.subtitle <- "Assumption achieved: expected compound return = 7% (w/o TDA transfer)"
# fig_ERCdist <-
# df_all.stch %>%
# filter(runname %in% runs_TDA) %>%
# select(runname.fct, year,
# ERC_PR.q25,
# ERC_PR.q50,
# ERC_PR.q75,
# ERC_PR.q90) %>%
# gather(Var, value, -runname.fct, -year) %>%
# # mutate(
# # runname = factor(runname, levels = runs_test, labels = runs_test_labels)
# # ) %>%
# #mutate(type = ifelse(str_detect(Var, "TDA"), "wTDA", "noTDA"),
# # Var = str_replace(Var, "TDApayouts_", "")) %>%
# # mutate(runname = factor(runname, labels = c(lab_s1, lab_s2))) %>%
# ggplot(aes(x = year, y = value,
# color = factor(Var, levels = c("ERC_PR.q90", "ERC_PR.q75", "ERC_PR.q50", "ERC_PR.q25")))) +
# facet_grid(. ~ runname.fct) +
# theme_bw() +
# geom_line() +
# geom_point(size = 2) +
# coord_cartesian(ylim = c(-20,100)) +
# scale_x_continuous(breaks = c(2016, seq(2020, 2045, 5))) +
# scale_y_continuous(breaks = seq(-100, 100, 10)) +
# scale_color_manual(values = c("red", RIG.red, RIG.blue, RIG.green), name = NULL,
# label = c("90th percentile", "75th percentile", "50th percentile", "25th percentile")) +
# scale_shape_manual(values = c(14, 17, 16, 15, 18), name = NULL,
# label = c("90th percentile", "75th percentile", "50th percentile", "25th percentile")) +
# labs(title = fig.title,
# subtitle = fig.subtitle,
# x = NULL, y = "%") +
# theme(axis.text.x = element_text(size = 8)) +
# RIG.theme()
# fig_ERCdist
#
#
# # Figure: Dsitribution of ERC
#
# fig.title <- "Distribution of funded ratios across simulations"
# fig.subtitle <- "Assumption achieved: expected compound return = 7% (w/o TDA transfer)"
# fig_FRdist <- df_all.stch %>%
# filter(runname %in% runs_TDA) %>%
# select(runname.fct, year, FR.q75, FR.q50, FR.q25, FR.q10) %>%
# gather(type, value, -runname.fct, -year) %>%
# mutate(type = factor(type, levels = c("FR.q75", "FR.q50", "FR.q25", "FR.q10"),
# labels = c("75th percentile", "50th percentile", "25th percentile", "10th percentile")
# )) %>%
# ggplot(aes(x = year,
# y = value,
# color = type),
# shape = type) + theme_bw() +
# facet_grid(.~runname.fct) +
# geom_line() +
# geom_point(size = 2) +
# geom_hline(yintercept = 100, linetype = 2, size = 1) +
# coord_cartesian(ylim = c(0,160)) +
# scale_x_continuous(breaks = c(2016, seq(2020, 2045, 5))) +
# scale_y_continuous(breaks = seq(0, 500, 20)) +
# scale_color_manual(values = c(RIG.green, RIG.blue, RIG.red, "red"), name = NULL) +
# scale_shape_manual(values = c(15, 16, 17, 18), name = NULL) +
# labs(title = fig.title,
# subtitle = fig.subtitle,
# x = NULL, y = "Percent") +
# theme(axis.text.x = element_text(size = 8)) +
# RIG.theme()
#
# fig_FRdist
#
#
# #*****************************************************
# ## Analysis 2 Impact of Funding policy ####
# #*****************************************************
#
#
# runs_fPolicy1 <- c(
# "t4a_TDAamort",
# "t4a_C15dA0",
# "t4a_O15dA6",
# "t4a_O15pA6",
# "t4a_O30pA6",
# "t4a_C30dA6",
# "t4a_C15pA6"
#
# )
#
# runs_fPolicy_labels1 <- c("TRS policy",
# "no asset smoothing",
# "Open amort.",
# "Open level pct amort.",
# "open level pct 30-year amort.",
# "30-year amort.",
# "Level percent amort."
# )
#
#
# fig.title <- "Probability of funded ratio below 40% or 50% in any year up to the given year"
# fig.subtitle <- "Assumption achieved; expected compound return = 7% (w/o TDA transfer)"
# fig_lowFR.fPolicy <-
# df_all.stch %>%
# filter(runname %in% runs_fPolicy1[1:5]) %>%
# mutate(runname.fct = factor(runname, levels = runs_fPolicy1[1:5], labels = runs_fPolicy_labels1[1:5])) %>%
# select(runname.fct, year, FR40less, FR50less) %>%
# #mutate(FR40less.det = 0) %>%
# gather(variable, value, -year, -runname.fct) %>%
# mutate(variable = factor(variable,
# levels = c("FR40less", "FR50less"),
# labels = c("Funded ratio below 40%", "Funded ratio below 50%"))) %>%
# ggplot(aes(x = year, y = value, color = runname.fct, shape = runname.fct)) + theme_bw() +
# facet_grid(.~ variable) +
# geom_point(size = 2) + geom_line() +
# coord_cartesian(ylim = c(0,60)) +
# scale_y_continuous(breaks = seq(0,200, 5)) +
# scale_x_continuous(breaks = c(2016, seq(2020, 2045, 5))) +
# scale_color_manual(values = c("black", "grey60", RIG.blue, RIG.red, RIG.green), name = "") +
# scale_shape_manual(values = c(17,16,15, 18, 19), name = "") +
# labs(title = fig.title,
# subtitle = fig.subtitle,
# x = NULL, y = "Probability (%)") +
# guides(color = guide_legend(keywidth = 1.5, keyheight = 3))+
# RIG.theme()
# fig_lowFR.fPolicy
#
#
# # Figure: Risk of ERC hike (3 lines in a single graph)
#
# fig.title <- "Probability of employer contribution rising more than 10% of payroll \nin a 5-year period at any time prior to and including the given year"
# fig.subtitle <- "Assumption achieved; expected compound return = 7% (w/o TDA transfer)"
# fig_ERChike.fPolicy <- df_all.stch %>%
# filter(runname %in% runs_fPolicy1[1:5]) %>%
# mutate(runname.fct = factor(runname, levels = runs_fPolicy1[1:5], labels = runs_fPolicy_labels1[1:5])) %>%
# select(runname.fct, year, ERC_hike) %>%
# #mutate(ERChike.det = 0) %>%
# # gather(type, value, -year, -runname) %>%
# ggplot(aes(x = year, y = ERC_hike, color = runname.fct, shape = runname.fct)) + theme_bw() +
# geom_point(size = 2) + geom_line() +
# coord_cartesian(ylim = c(0,100)) +
# scale_y_continuous(breaks = seq(0,200, 10)) +
# scale_x_continuous(breaks = c(2016, seq(2020, 2045, 5))) +
# scale_color_manual(values = c("black", "grey60", RIG.blue, RIG.red, RIG.green), name = "") +
# scale_shape_manual(values = c(17,16,15, 18, 19), name = "") +
# labs(title = fig.title,
# subtitle = fig.subtitle,
# x = NULL, y = "Probability (%)") +
# guides(color = guide_legend(keywidth = 1.5, keyheight = 3))+
# RIG.theme()
# fig_ERChike.fPolicy
#
#
# # Figure: Risk of high ERC (3 lines in a single graph)
#
# fig.title <- "Probability of employer contribution rising above 60% of payroll \nat any time prior to and including the given year"
# fig.subtitle <- "Assumption achieved; expected compound return = 7% (w/o TDA transfer)"
# fig_ERChigh.fPolicy <- df_all.stch %>%
# filter(runname %in% runs_fPolicy1[1:5]) %>%
# mutate(runname.fct = factor(runname, levels = runs_fPolicy1[1:5], labels = runs_fPolicy_labels1[1:5])) %>%
# select(runname.fct, year, ERC_high) %>%
# #mutate(ERChike.det = 0) %>%
# # gather(type, value, -year, -runname) %>%
# ggplot(aes(x = year, y = ERC_high, color = runname.fct, shape = runname.fct)) + theme_bw() +
# geom_point(size = 2) + geom_line() +
# coord_cartesian(ylim = c(0,50)) +
# scale_y_continuous(breaks = seq(0,200, 10)) +
# scale_x_continuous(breaks = c(2016, seq(2020, 2045, 5))) +
# scale_color_manual(values = c("black", "grey60", RIG.blue, RIG.red, RIG.green), name = "") +
# scale_shape_manual(values = c(17,16,15, 18, 19), name = "") +
# labs(title = fig.title,
# subtitle = fig.subtitle,
# x = NULL, y = "Probability (%)") +
# guides(color = guide_legend(keywidth = 1.5, keyheight = 3))+
# RIG.theme()
# fig_ERChigh.fPolicy
#
#
|
1d2e82b738f64723f5a4fac071cf58a6d3c43ce0
|
e56262bee9693f61021fea5fc000ebcf46ac34bb
|
/man/C_node_depth.Rd
|
5eb2f0d08d1d4d0e9b6b2ebb1cca8047b003a025
|
[] |
no_license
|
nanoquanta/TreeTools
|
d1ed57deb83122366b422117642eb986df1457bf
|
a858cf1c96de19b786b8243ef3d4ddfd6d0d8dd1
|
refs/heads/master
| 2020-08-26T09:35:04.083356
| 2019-10-19T10:59:15
| 2019-10-19T10:59:15
| 216,997,642
| 0
| 1
| null | 2019-10-23T07:41:41
| 2019-10-23T07:41:40
| null |
UTF-8
|
R
| false
| true
| 308
|
rd
|
C_node_depth.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phylo.R
\name{C_node_depth}
\alias{C_node_depth}
\title{Node depth
Wrapper for the ape function}
\usage{
C_node_depth(nTip, nNode, parent, child, nEdge)
}
\description{
Node depth
Wrapper for the ape function
}
\keyword{internal}
|
b9fb41a6c9c8468de4958a9f066916996e85445a
|
b7fe71b49afb5978a6628b1833dda3760a4bde1a
|
/man/daphnia.Rd
|
cd27c31c64fbfef95d8f1d784133fb4a3c3995e5
|
[] |
no_license
|
cran/vitality
|
65944312461109593f18fb52371d15f13c0b91b8
|
e4ab8e6ce8bbee13404c98e3906afb04bd6ecd76
|
refs/heads/master
| 2021-01-21T21:55:04.170484
| 2018-05-13T20:26:30
| 2018-05-13T20:26:30
| 17,700,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
rd
|
daphnia.Rd
|
\docType{data}
\name{daphnia}
\alias{daphnia}
\title{Sample Daphnia Data}
\format{data frame}
\source{
http://cbr.washington.edu/analysis/vitality
Anderson, J.J. (2000). "A vitality-based model relating stressors and environmental properties to organism survival." Ecological Monographs 70(3):445-470 (Figure 5)
}
\description{
Sample survival data for daphnia. Columns include "days" and "lx" (cumulative survival proportion by day).
}
\keyword{datasets}
|
f236b3ad5e461df4304cbdc1296a972487e99afd
|
c9d7e4f0fcc61eb7c5215fdffced4b9db3c34d7e
|
/man/print.cirq.devices.line_qubit.LineQubit.Rd
|
f5c6925c3aa38661eb20dd14b5e9f4fee601de68
|
[
"Apache-2.0"
] |
permissive
|
turgut090/Cirq
|
091424a209295d0478459dcaa80a6d74384f9690
|
cfa48055034a83655e56fb9a6c9f0499dd48d710
|
refs/heads/master
| 2022-10-06T04:37:32.333261
| 2020-06-07T06:06:11
| 2020-06-07T06:06:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 427
|
rd
|
print.cirq.devices.line_qubit.LineQubit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generic_print_op.R
\name{print.cirq.devices.line_qubit.LineQubit}
\alias{print.cirq.devices.line_qubit.LineQubit}
\title{LineQubit}
\usage{
\method{print}{cirq.devices.line_qubit.LineQubit}(x, ...)
}
\arguments{
\item{x}{an object used to select a method.}
\item{...}{further arguments passed to or from other methods.}
}
\description{
LineQubit
}
|
e8862f8f492ac0bd630bfe124c2a863c93ee7ccc
|
910a9f85f4712cfb05be5b6a8e0c9c36096aeea6
|
/diuretic_control_gout_GWAS_setup_all_controls.R
|
9905ef4aef8ff7d044f1deb90a0d099641b73ba5
|
[] |
no_license
|
lizhihao1990/Cadzow2017_Ukbiobank_Gout
|
8345f7edbd2f0142f4eb08ebb2d3233b1eb85d4c
|
15eefd10ba5b58cbeb7d4f785971e4c96d6616f3
|
refs/heads/master
| 2020-05-09T15:29:25.186983
| 2017-08-15T22:24:42
| 2017-08-15T22:24:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,599
|
r
|
diuretic_control_gout_GWAS_setup_all_controls.R
|
# Murray Cadzow
# University of Otago
# 21 April 2016
# script for creating normal gout GWAS using all cases for each gout criteria and all controls
#control criteria: diuretics
#gout criteria: all, self report, self report + ULT, winnard, hospital
# run ukbiobank_gout.R first to create required affection columns
# only change affection status based on gout/control criteria, everything else is -9 and
# plink will handle subsetting during analysis
#GWAS_all_controls/{controls_no_diuretics,controls_diuretics,controls}/{all,winnard,hosp,all_male,hosp_male,self,self_ult}/{adjusted,unadjusted}
xsan <- '/media/xsan/'
# read in fam file for sample filtered plink files
data_dir <- paste0(xsan,"/staff_groups/merrimanlab/Documents/Murray/ukbiobank_util/data/")
scratch_dir <- paste0(xsan,"/scratch/merrimanlab/murray/working_dir/UkBio/")
control_cond <- 'controls_diuretics'
load(paste0(data_dir,"ukbiobank_genotyped2016-04-26.RData"))
genotyped$waist_to_height_ratio <- genotyped$f.48.0.0 / genotyped$f.50.0.0
fam_file <- read.table(paste0(scratch_dir,"chr1impv1.fam"), header=FALSE, stringsAsFactors = FALSE)
colnames(fam_file) <- c("FID","IID","PID","MID","SEX","AFF")
fam_file$sort <- as.numeric(rownames(fam_file))
# f.210000.0.0 = ethnicity
# f.31.0.0 = sex
# f.21003.0.0 = age
# f.21001.0.0 = bmi
# f.48.0.0 = waist
# f.50.0.0 = height
# f.22001.0.0 = genetic sex
#merge
new_fam_file <- merge(fam_file, genotyped[,c("f.eid", "f.21000.0.0", "f.31.0.0", "f.21003.0.0","f.21001.0.0",'waist_to_height_ratio', 'f.48.0.0', 'f.50.0.0','f.22001.0.0', "goutaff", "control", "goutwinnard","goutself", "gouthosp","gout_winnard_self","goutall", "goutult","gout_self_ult", 'diuretics', colnames(genotyped)[grep('22009',colnames(genotyped))] )], by.x = "IID", by.y = "f.eid", all.x=TRUE)
#resort
new_fam_file <- new_fam_file[order(new_fam_file$sort),]
#check lengths are equal
length(fam_file[,1]) == length(new_fam_file[,1])
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 2
# affstat for all gout
gout_cond <- 'all'
new_fam_file[new_fam_file$goutall == 1 & !is.na(new_fam_file$goutall), "AFF"] <- 2
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control) & new_fam_file$diuretics == 1 & !is.na(new_fam_file$diuretics),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
### all_male
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- NA
# affstat for all gout
gout_cond <- 'all_male'
new_fam_file[new_fam_file$goutall == 1 & !is.na(new_fam_file$goutall), "AFF"] <- 2
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control) & new_fam_file$diuretics == 1 & !is.na(new_fam_file$diuretics),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
#### fam gout hospital
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 2
# affstat for hosp gout
gout_cond <- 'hosp'
new_fam_file[new_fam_file$gouthosp == 1 & !is.na(new_fam_file$gouthosp), "AFF"] <- 2
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control) & new_fam_file$diuretics == 1 & !is.na(new_fam_file$diuretics),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
#### fam gout hospital male
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- NA
# affstat for hosp gout
gout_cond <- 'hosp_male'
new_fam_file[new_fam_file$gouthosp == 1 & !is.na(new_fam_file$gouthosp), "AFF"] <- 2
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control) & new_fam_file$diuretics == 1 & !is.na(new_fam_file$diuretics),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
#### fam gout winnard
#reset sex and affstat
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 2
# affstat for winnard gout
gout_cond <- 'winnard'
new_fam_file[new_fam_file$goutwinnard == 1 & !is.na(new_fam_file$goutwinnard), "AFF"] <- 2
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control) & new_fam_file$diuretics == 1 & !is.na(new_fam_file$diuretics),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
#### fam gout self
#reset sex and affstat
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 2
# affstat for self reported gout
gout_cond <- 'self'
new_fam_file[new_fam_file$goutself == 1 & !is.na(new_fam_file$goutself), "AFF"] <- 2
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control) & new_fam_file$diuretics == 1 & !is.na(new_fam_file$diuretics),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
### self defined or ULT
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 2
# affstat for self + ULT gout
gout_cond <- 'self_ult'
new_fam_file[new_fam_file$gout_self_ult == 1 & !is.na(new_fam_file$gout_self_ult), "AFF"] <- 2
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control) & new_fam_file$diuretics == 1 & !is.na(new_fam_file$diuretics),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
### only self, not classified any other way
#reset sex and affstat
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 2
# affstat for self reported gout
gout_cond <- 'self_only'
new_fam_file[new_fam_file$goutself == 1 & !is.na(new_fam_file$goutself), "AFF"] <- 2
new_fam_file[new_fam_file$goutwinnard == 1 & !is.na(new_fam_file$goutwinnard), "AFF"] <- NA # includes ULT too
new_fam_file[new_fam_file$gouthosp == 1 & !is.na(new_fam_file$gouthosp), "AFF"] <- NA
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
### ULT
#reset case/control as precaution
new_fam_file$AFF <- -9
new_fam_file$SEX <- -9
#in ukbio males are coded as female = 0, males = 1
new_fam_file[new_fam_file$f.31.0.0 == 1 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 1
new_fam_file[new_fam_file$f.31.0.0 == 0 & !is.na(new_fam_file$f.31.0.0), "SEX"] <- 2
# affstat for self + ULT gout
gout_cond <- 'ult'
new_fam_file[new_fam_file$goutult == 1 & !is.na(new_fam_file$goutult), "AFF"] <- 2
new_fam_file[new_fam_file$control == 1 & !is.na(new_fam_file$control) & new_fam_file$diuretics == 1 & !is.na(new_fam_file$diuretics),"AFF"] <- 1
table(new_fam_file$AFF, new_fam_file$f.21000.0.0, exclude=NULL)
# blank out non-white ethnicities
new_fam_file[!(!is.na(new_fam_file$f.21000.0.0) & (new_fam_file$f.21000.0.0 == 1001 | new_fam_file$f.21000.0.0 == 1002 | new_fam_file$f.21000.0.0 == 1003)) , "AFF"] <- -9
write.table(new_fam_file[,c("FID","IID","PID","MID","SEX","AFF")], file = paste0(scratch_dir,"GWAS_all_controls/",control_cond,'/',gout_cond,'/chrallimpv1.fam_',gout_cond), col.names=FALSE, row.names=FALSE, quote=FALSE, sep = ' ')
# only need to create this once and needs to include everyone
co_var_file <- new_fam_file[,c("FID","IID","f.21003.0.0", "f.21001.0.0", 'waist_to_height_ratio', 'f.48.0.0', colnames(new_fam_file)[grep('22009', colnames(new_fam_file))])]
colnames(co_var_file) <- c("FID","IID","AGE","BMI", "WaistHeightRatio", 'Waist', paste0('PCA',1:15))
write.table(co_var_file, file = paste0(scratch_dir,"GWAS_all_controls/chrallimpv1.covar"), col.names=TRUE, row.names=FALSE, quote=FALSE, sep = ' ')
|
975d7e9e44d87b19dedef5a4aeb573cf3f53e62e
|
0c139ccd885ec95ba4da22f890270ba46fdaebbb
|
/code/fame_analysis.R
|
1552432a2e16dcb0f5ebfeec28a0ab80e1b0b360
|
[] |
no_license
|
demichelislab/FaME
|
0bf0cb0d00c07ab595bbb0b44fd505293772e69f
|
7ddb3bc1748a958b3f03cdb6f73eebb29f523853
|
refs/heads/main
| 2023-04-07T03:49:15.210084
| 2021-09-21T15:00:46
| 2021-09-21T15:00:46
| 346,786,494
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,828
|
r
|
fame_analysis.R
|
librarian::shelf(
parallel
)
# Finds the path where to load the source file
orig_path <- getwd()
source_file <- head(grep('^-?-f(?:ile)=', commandArgs(trailingOnly = FALSE), value = TRUE, perl = TRUE), 1)
if (length(source_file)) {
new_wd <- sub('^-[^=]+=(.*?)/?[^/]+$', '\\1', source_file)
print(paste('Changing directory to', new_wd))
setwd(new_wd)
} else {
stop('Cannot determine source directory')
}
while (!dir.exists('code')) {
setwd('..')
if (getwd() == '/') {
stop('Cannot find the root folder of the project')
}
}
project_path <- getwd()
setwd(orig_path)
source(paste0(project_path, '/code/fame_core.R'))
num_cores <- detectCores()
# Reads all the files with genomic data from the specified folder
data_per_gene <- pipeline({
list.files('data/genomic', full.names = TRUE)
map_dfr(fread)
group_by(sample_id)
filter(any(as_cn_disc != 'nd'))
ungroup
as.data.table
})
# All the possible combinations of these genes will be tested
interest_genes <- pipeline({
fread('data/resources/gene_sets/cancer_and_druggable_genes.tsv.gz')
pull(gene)
})
# The genomic data of the interest genes
interest_genes_data <- pipeline({
data_per_gene[hugo %in% interest_genes]
nest(data = -dataset)
arrange(stri_order(dataset))
(setNames(.$data, .$dataset))
map( ~ pipeline({
.x
arrange(hugo, sample_id)
as.data.table
})
)
})
# This matrix contains the allele specific data of the loaded datasets
as_cn_matrix <- pipeline({
interest_genes_data
map(~ dcast(.x, hugo ~ sample_id, value.var = 'as_cn_disc', fill = NA_character_))
})
# This builds the binary aberration matrix for a specific allele specific copy
# number aberration. This matrix will have a value of 1 if a sample have
# an Homozygous deletion at a specific gene 0 otherwise.
homo_del_matrix <- pipeline({
as_cn_matrix
map(~ build_aberration_matrix(.x, 'homo_del'))
})
# This matrix will have a value of 1 if a sample have an Hemizygous deletion at
# a specific gene 0 otherwise.
hemi_del_matrix <- pipeline({
as_cn_matrix
map(~ build_aberration_matrix(.x, 'hemi_del'))
})
# This matrix will have a value of 1 if a sample have an Copy Neutral LOH at a
# specific gene 0 otherwise.
cnnl_matrix <- pipeline({
as_cn_matrix
map(~ build_aberration_matrix(.x, 'cnnl'))
})
# This matrix will have a value of 1 if a sample have an amplification at a
# specific gene 0 otherwise.
amp_matrix <- pipeline({
as_cn_matrix
map(~ build_aberration_matrix(.x, 'amp'))
})
# This matrix will have a value of 1 if a sample have an SNV at a
# specific gene 0 otherwise.
snv_matrix <- pipeline({
interest_genes_data
map(~ {
pipeline({
dcast(.x, hugo ~ sample_id, value.var = 'count_snvs_deleterious', fill = 0L, fun.aggregate = function (x) as.integer(sign(x)))
})
})
map(~ as.matrix(.x[, -1], rownames.value = .x[[1]]))
})
all_datasets <- pipeline({
data_per_gene
(dataset)
unique
stri_sort
setNames(.)
})
# Named list with the simple aberrations to test
aberrations_bases <- list(
'hemi_del' = hemi_del_matrix,
'cnnl' = cnnl_matrix,
'snv' = snv_matrix
)
# Named list with the combined aberrations to test. In this case the aberrations
# are combined with an OR operation.
aberration_combinations <- pipeline({
list(
hemi_cnnl = c('hemi_del', 'cnnl'),
hemi_cnnl_snv = c('hemi_del', 'cnnl', 'snv')
)
map(~ {
selected_abs <- aberrations_bases[.x]
map(all_datasets, function (nn) {
cc <- map(selected_abs, ~ .x[[nn]])
reduce(cc, combine_mats)
})
})
})
aberrations <- c(
aberrations_bases,
aberration_combinations
)
# This combines all the matrices for all the dataset in an unique matrix in
# order to test all the dataset in a pan-cancer fashion.
pancancer_aberrations <- pipeline({
aberrations
map(~ list(pancancer = reduce(.x, cbind)))
})
# Generates all the possible combinations of aberrations. FAME efficiency enable
# the possibility to test many aberrations combinations.
aberration_comparisons <- pipeline({
c(
'hemi_del',
'cnnl',
'hemi_cnnl',
'hemi_cnnl_snv',
'snv'
)
(expand.grid(
a2 = .,
a1 = .,
stringsAsFactors = FALSE
))
filter(
a1 <= a2
)
select(a1, a2)
as.data.table
})
# Tests all the aberrations combinations on all the pairs of genes separately
# for each dataset.
results_per_project <- pipeline({
aberration_comparisons
pmap(c)
map_dfr(~ {
cat(.x, '\n')
pipeline({
aberrations[.x]
map(~ { .x[all_datasets] })
pmap(list)
map(~ compute_counts(.x[1], .x[2]))
mcmapply(names(.), FUN = function (dd, nn) {
print(nn)
res <- run_tests(dd)
gc()
res
}, mc.cores = num_cores, SIMPLIFY = FALSE)
bind_rows(.id = 'dataset')
mutate(
a1 = .x[[1]],
a2 = .x[[2]]
)
select(a1, a2, everything())
as.data.table
})
})
(? gc())
})
fwrite(results_per_project, 'data/result_pairs.tsv', sep = '\t')
# Tests all the aberrations combinations on all the pairs of genes on the
# pancancer dataset (all the datasets combined).
results_pancancer <- pipeline({
aberration_comparisons
pmap(c)
map_dfr(~ {
cat(.x, '\n')
pipeline({
pancancer_aberrations[.x]
pmap(list)
map(~ compute_counts(.x[1], .x[2]))
mcmapply(names(.), FUN = function (dd, nn) {
print(nn)
res <- run_tests(dd)
gc()
res
}, mc.cores = num_cores, SIMPLIFY = FALSE)
bind_rows(.id = 'dataset')
mutate(
a1 = .x[[1]],
a2 = .x[[2]]
)
select(a1, a2, everything())
as.data.table
(? gc())
})
})
(? gc())
})
fwrite(results_pancancer, 'data/result_pairs_pancancer.tsv', sep = '\t')
|
6fc5e67bfef1bbe76a2bab4614139c995932dce5
|
d899a92e376c20f1426889565917d90c0dca22a3
|
/man/no_whitespace.Rd
|
0e419de027ad95ca1efa29f67ba2d301ce721d1e
|
[] |
no_license
|
gmoyerbrailean/clickme
|
e6779cdabf78c4ed206cf0b9314f6549a51fccc1
|
8d9eaff51d7c38a13129d2aa76b69e288d87e5f2
|
refs/heads/master
| 2021-01-12T19:32:30.183248
| 2013-09-14T00:48:32
| 2013-09-14T00:48:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 165
|
rd
|
no_whitespace.Rd
|
\name{no_whitespace}
\alias{no_whitespace}
\title{Remove whitespace from a string}
\usage{
no_whitespace(str)
}
\description{
Remove whitespace from a string
}
|
5f546d118bb952f0ca29f684832f736913e44edc
|
11577d5ab6897ad8ca94cc7a82f1259b9b9b6994
|
/datasets.R
|
e6fbde7f4acbe41f859e8b41fa53d1bcdb1cedf8
|
[] |
no_license
|
nbest937/thesis
|
33a39d104c23561d2c2a736ff4efcfd8df0581a0
|
3490178bef857253de34e0c0b40c69ea3137cfa4
|
refs/heads/master
| 2018-12-31T21:16:54.384049
| 2012-03-22T22:15:06
| 2012-03-22T22:15:06
| 717,267
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,921
|
r
|
datasets.R
|
###################################################
### chunk number 1: initialize
###################################################
#line 17 "/home/nbest/thesis/datasets.Rnw"
# load helper functions
# code will appear in appendix
source("~/thesis/code/peel.R")
source("~/thesis/code/maps.R")
setwd( "~/thesis/datasets")
overwriteRasters <- FALSE
overwriteFigures <- TRUE
###################################################
### chunk number 2: thumb
###################################################
#line 102 "/home/nbest/thesis/datasets.Rnw"
texWd <- setwd("../data")
dataWd <- getwd()
## this works but it's slow
##
## thumb <- crop( raster("2001_lct1.tif"),
## extent(-83.5, -(82+25/60), 42+55/60, 44+5/60))
##
## these are subsets exported from GRASS
thumb <- mlctList( "thumb_2001_lct1.tif",
"thumb_2001_lct1_sec.tif",
"thumb_2001_lct1_pct.tif")
igbpLegend <- thumb$pri@legend@colortable
igbpLegend <- igbpLegend[ igbpLegend != "#000000"]
## just in case, save these for later
## paste( deparse( igbpLegend), collapse="")
## igbpLegend <- c("#2041B3",
## "#006A0F",
## "#007C25",
## "#00A25B",
## "#00A125",
## "#069228",
## "#9E9668",
## "#C1C48F",
## "#85AA5B",
## "#B1B741",
## "#A4D07E",
## "#73ABAE",
## "#CCD253",
## "#D90000",
## "#9DE36E",
## "#B6B5C2",
## "#949494")
###################################################
### chunk number 3: mlct-reclass
###################################################
#line 202 "/home/nbest/thesis/datasets.Rnw"
thumb <- mlctReclass( thumb, mlctReclassMatrix, overwrite= overwriteRasters)
if( overwriteFigures) {
thumbPlots <- list( pri= peelMap( thumb$pri, 0.4),
sec= peelMap( thumb$sec, 0.4))
thumbPlots$pct <- ggplotRaster( thumb$pct, 0.4) +
scale_fill_gradientn( "% conf",
colours= rev( brewer.pal( 7, "YlGn")),
limits= c( 100, 0),
breaks= seq( 100, 0, by= -20))
}
###################################################
### chunk number 4: fig_thumb_pri_reclass
###################################################
#line 224 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_pri_reclass.png",
plot= thumbPlots$pri)
}
###################################################
### chunk number 5: fig_thumb_sec_reclass
###################################################
#line 250 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_sec_reclass.png",
plot= thumbPlots$sec)
}
###################################################
### chunk number 6: fig_thumb_pct
###################################################
#line 282 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_pct.png",
plot= thumbPlots$pct)
}
###################################################
### chunk number 7: fig_thumb_pri_facet
###################################################
#line 320 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_pri_facet.png",
plot= thumbPlots$pri +
facet_wrap(~ values) +
opts( legend.position= "none"))
}
###################################################
### chunk number 8: fig_thumb_sec_facet
###################################################
#line 341 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_sec_facet.png",
plot= thumbPlots$sec +
facet_wrap(~ values) +
opts( legend.position= "none"))
}
###################################################
### chunk number 9: mlct_reclass
###################################################
#line 382 "/home/nbest/thesis/datasets.Rnw"
## repeat for cUSA
setwd( dataWd)
mlct <- mlctList( "2001_lct1.tif",
"2001_lct1_sec.tif",
"2001_lct1_pct.tif")
mlct <- mlctReclass( mlct, mlctReclassMatrix, overwrite= overwriteRasters, datatype="INT1U", progress="text")
if( overwriteFigures) {
mlctPlots <- list( pri= peelMap( mlct$pri, 16e-4),
sec= peelMap( mlct$sec, 16e-4))
mlctPlots$pct <- ggplotRaster( mlct$pct, 16e-4) +
scale_fill_gradientn( "% conf",
colours= rev( brewer.pal( 7, "YlGn")),
limits= c( 100, 0),
breaks= seq( 100, 0, by= -20))
}
###################################################
### chunk number 10: fig_mlct_pri_reclass
###################################################
#line 408 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_mlct_pri_reclass.png",
plot= mlctPlots$pri, width=7.5)
system( sprintf( "convert -trim %s/fig_mlct_pri_reclass.png %s/fig_mlct_pri_reclass_trim.png",
texWd, texWd))
}
###################################################
### chunk number 11: fig_mlct_sec_reclass
###################################################
#line 427 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_mlct_sec_reclass.png",
plot= mlctPlots$sec, width=7.5)
system( sprintf( "convert -trim %s/fig_mlct_sec_reclass.png %s/fig_mlct_sec_reclass_trim.png",
texWd, texWd))
}
###################################################
### chunk number 12: fig_mlct_pct
###################################################
#line 447 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_mlct_pct.png",
plot= mlctPlots$pct, width=7.5)
system( sprintf( "convert -trim %s/fig_mlct_pct.png %s/fig_mlct_pct_trim.png",
texWd, texWd))
}
###################################################
### chunk number 13: fig_mlct_pri_facet
###################################################
#line 467 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_mlct_pri_facet.png",
plot= peelMap( mlct$pri, 16e-4,
classes= names( peelClasses)[1:5]) +
facet_grid( values ~ .) +
opts( legend.position= "none"),
width=4.5, height=8)
}
###################################################
### chunk number 14: fig_mlct_pri_facet2
###################################################
#line 488 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_mlct_pri_facet2.png",
plot= peelMap( mlct$pri, 16e-4,
classes= names( peelClasses)[6:9]) +
facet_grid( values ~ .) +
opts( legend.position= "none"),
width=4.5, height=8)
}
###################################################
### chunk number 15: fig_mlct_sec_facet
###################################################
#line 510 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_mlct_sec_facet.png",
plot= peelMap( mlct$sec, 16e-4,
classes= names( peelClasses)[1:5]) +
facet_grid( values ~ .) +
opts( legend.position= "none"),
width=4.5, height=8)
}
###################################################
### chunk number 16: fig_mlct_sec_facet2
###################################################
#line 531 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_mlct_sec_facet2.png",
plot= peelMap( mlct$sec, 8e-3,
classes= names( peelClasses)[6:9]) +
facet_grid( values ~ .) +
opts( legend.position= "none"),
width=4.5, height=8)
}
###################################################
### chunk number 17: thumbPlots
###################################################
#line 607 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
## calculate cover fractions and aggregate for detail area
thumb <- primaryFraction( thumb, Amin=0.5,
overwrite= overwriteRasters, progress= "text")
thumb1 <- primaryFraction( thumb, Amin=1.0,
overwrite= overwriteRasters, progress= "text")
thumb <- coverFractions( thumb,
overwrite= overwriteRasters, progress= "text")
thumb1 <- coverFractions( thumb1,
overwrite= overwriteRasters, progress= "text")
thumb <- aggregateFractions( thumb,
overwrite= overwriteRasters, progress= "text")
thumb1 <- aggregateFractions( thumb1,
overwrite= overwriteRasters, progress= "text")
if( overwriteFigures) {
thumbPlots <- list( fracs= coverMaps( thumb$fracs, 0.4),
agg= coverMaps( thumb$agg, 1))
thumbPlots1 <- list( fracs= coverMaps( thumb1$fracs, 0.4),
agg= coverMaps( thumb1$agg, 1))
}
###################################################
### chunk number 18: fig_thumb_fracs
###################################################
#line 651 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_fracs.png",
plot= thumbPlots$fracs)
}
###################################################
### chunk number 19: fig_thumb1_fracs
###################################################
#line 676 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb1_fracs.png",
plot= thumbPlots1$fracs)
}
###################################################
### chunk number 20: fig_thumb1_agg
###################################################
#line 708 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb1_agg.png",
plot= thumbPlots1$agg)
}
###################################################
### chunk number 21: fig_thumb_agg
###################################################
#line 726 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_agg.png",
plot= thumbPlots$agg)
}
###################################################
### chunk number 22: thumbAggDiff
###################################################
#line 755 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
thumbAggDiff <-
if( overwriteRasters) {
overlay( thumb$agg, thumb1$agg,
fun= function( t, t1) t -t1,
filename= "thumb_agg_diff.tif",
overwrite= TRUE)
} else brick( "thumb_agg_diff.tif")
layerNames( thumbAggDiff) <- layerNames( thumb$agg)
if( overwriteFigures) {
thumbAggDiffPlot <- coverMaps( thumbAggDiff) +
scale_fill_gradientn( "diff", colours= rev( brewer.pal( 11, "BrBG")),
limits= c( 0.1, -0.1),
breaks= seq( 0.1, -0.1, by= -0.02))
}
###################################################
### chunk number 23: fig_thumb_agg_diff
###################################################
#line 793 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_agg_diff.png",
plot= thumbAggDiffPlot)
}
###################################################
### chunk number 24: mlct_agg
###################################################
#line 824 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
mlct <- primaryFraction( mlct, Amin= 0.5,
overwrite= overwriteRasters,
progress="text")
mlct <- coverFractions( mlct,
overwrite= overwriteRasters,
progress="text")
mlct <- aggregateFractions( mlct,
overwrite= overwriteRasters,
progress="text")
mlct1 <- primaryFraction( mlct, Amin=1.0,
overwrite= overwriteRasters,
progress="text")
mlct1 <- coverFractions( mlct1,
overwrite= overwriteRasters,
progress="text")
mlct1 <- aggregateFractions( mlct1,
overwrite= overwriteRasters,
progress="text")
###################################################
### chunk number 25: thumbNomos
###################################################
#line 913 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
thumb <- decomposeMosaic( thumb, overwrite= overwriteRasters, progress= "text")
thumb1 <- decomposeMosaic( thumb1, overwrite= overwriteRasters, progress= "text")
if( overwriteFigures) {
thumbPlots$nomos <- coverMaps( thumb$nomos)
thumbPlots1$nomos <- coverMaps( thumb1$nomos)
}
thumbNomosDiff <-
if( overwriteRasters) {
overlay( thumb$nomos, thumb1$nomos,
fun= function( t, t1) t -t1,
filename= "thumb_nomos_diff.tif",
overwrite= TRUE)
} else brick( "thumb_nomos_diff.tif")
layerNames( thumbNomosDiff) <- layerNames( thumb$nomos)
if( overwriteFigures) {
thumbNomosDiffPlot <- coverMaps( thumbNomosDiff) +
scale_fill_gradientn( "diff", colours= rev( brewer.pal( 11, "BrBG")),
limits= c( 0.3, -0.3),
breaks= seq( 0.3, -0.3, by= -0.06))
}
###################################################
### chunk number 26: fig_thumb1_nomos
###################################################
#line 945 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb1_nomos.png",
plot= thumbPlots1$nomos)
}
###################################################
### chunk number 27: fig_thumb_nomos
###################################################
#line 964 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_nomos.png",
plot= thumbPlots$nomos)
}
###################################################
### chunk number 28: fig_thumb_nomos_diff
###################################################
#line 986 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures ) {
my.ggsave( texWd, "fig_thumb_nomos_diff.png",
plot= thumbNomosDiffPlot)
}
###################################################
### chunk number 29: mlct_nomos
###################################################
#line 1004 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
mlct <- decomposeMosaic( mlct, overwrite= overwriteRasters, progress="text")
mlct1 <- decomposeMosaic( mlct1, overwrite= overwriteRasters, progress="text")
## might be useful to cross-tabulate the primary and secondary
## frequencies for the cUSA
## table(thumbDf@data$pri, thumbDf@data$sec)
###################################################
### chunk number 30: thumb_nlcd
###################################################
#line 1078 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
setwd( "nlcd")
nlcdWd <- getwd()
thumbNlcd <- list( pri=raster( "thumbNlcd.tif"))
###################################################
### chunk number 31: thumb_nlcd_reclass
###################################################
#line 1157 "/home/nbest/thesis/datasets.Rnw"
setwd( nlcdWd)
thumbNlcd <- mlctReclass( thumbNlcd, nlcdReclassMatrix,
overwrite= overwriteRasters,
progress="text")
if( overwriteFigures) {
thumbNlcdPlot <- peelMap(thumbNlcd$pri, 0.1)
}
###################################################
### chunk number 32: fig_thumb_nlcd_reclass
###################################################
#line 1176 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_nlcd_reclass.png",
plot= thumbNlcdPlot, height= 5, width= 5)
}
###################################################
### chunk number 33: fig_thumb_nlcd_facet
###################################################
#line 1194 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_nlcd_facet.png",
plot= thumbNlcdPlot +
facet_wrap(~ values) +
opts( legend.position= "none"))
}
###################################################
### chunk number 34: thumb_nlcd_aggr
###################################################
#line 1224 "/home/nbest/thesis/datasets.Rnw"
setwd( nlcdWd)
thumbNlcd$Amin <- 1
thumbNlcd <-
coverFractions( thumbNlcd, mosaic=FALSE,
overwrite= overwriteRasters,
progress= "text")
thumbNlcd <-
aggregateFractions( thumbNlcd,
overwrite= overwriteRasters,
progress="text")
if( overwriteFigures) {
thumbNlcdAggPlot <- coverMaps( thumbNlcd$agg, 1)
}
###################################################
### chunk number 35: fig_thumb_nlcd_agg
###################################################
#line 1247 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_nlcd_agg.png",
plot= thumbNlcdAggPlot)
}
###################################################
### chunk number 36: nlcd
###################################################
#line 1265 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
nlcd <- stack( sapply( names( peelClasses[ -8]),
function( cover) {
list.files( paste( dataWd, "nlcd", sep="/"),
patt= paste( "nlcd", cover, "5min.tif$", sep="_"),
full.names= TRUE)
}))
nlcd <- setMinMax( nlcd)
layerNames(nlcd) <- names( peelClasses[ -8])
###################################################
### chunk number 37: fig_nlcd
###################################################
#line 1285 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
nlcdPlot <- coverMaps( nlcd, samp= 0.2,
classes= layerNames( nlcd)[ 1:4]) +
facet_grid( variable ~ .)
my.ggsave( texWd, "fig_nlcd.png", width=5.5, height=8)
}
###################################################
### chunk number 38: fig_nlcd2
###################################################
#line 1303 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
nlcdPlot2 <- coverMaps( nlcd, 0.2,
classes= layerNames( nlcd)[ 5:8]) +
facet_grid( variable ~ .)
my.ggsave( texWd, "fig_nlcd2.png", width=5.5, height=8)
}
###################################################
### chunk number 39: agland
###################################################
#line 1347 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
setwd( "agland")
agland <- stack( list.files( patt="(cropland|pasture).tif$"))
layerNames(agland) <- c("crop", "pasture")
agland <- setMinMax( agland)
thumbAgland <-
if( overwriteRasters) {
crop( agland,
extent(-83.5, -(82+25/60),
42+55/60, 44+5/60),
filename= "thumbAgland.tif",
progress="text",
overwrite= overwriteRasters)
} else brick( list.files( getwd(),
"thumbAgland.tif",
full.names= TRUE,
recursive= TRUE))
layerNames( thumbAgland) <- c("crop", "pasture")
# crop() returns a brick
if( overwriteFigures) {
thumbAglandPlot <-
coverMaps( thumbAgland, 1) +
facet_grid( variable ~ .)
aglandPlot <-
coverMaps( agland, 0.4) +
facet_grid( variable ~ .)
}
###################################################
### chunk number 40: fig_thumb_agland
###################################################
#line 1390 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_thumb_agland.png",
plot= thumbAglandPlot)
}
###################################################
### chunk number 41: fig_agland
###################################################
#line 1409 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
my.ggsave( texWd, "fig_agland.png",
plot= aglandPlot)
system( sprintf( "convert -trim %s/fig_agland.png %s/fig_agland_trim.png",
texWd, texWd))
}
###################################################
### chunk number 42: 175crops
###################################################
#line 1453 "/home/nbest/thesis/datasets.Rnw"
setwd( dataWd)
cropsWd <- path.expand( "~/see/data/raw/175crops2000/nc")
## list.files( cropsWd, "vrt$")
cropTable <- read.csv( "monfreda2008 table1.csv", header= TRUE)
## For now we consider only herbaceous crops
herbNotForage <- cropTable$type=="herbaceous" & cropTable$group != "Forage"
cropTable$cat <- NA
cropTable <- within( cropTable, {
cat[ map == "maize"] <- "maize"
cat[ map == "soybean"] <- "soybean"
cat[ map == "wheat"] <- "wheat"
cat[ map == "rice"] <- "rice"
cat[ group == "Cereals" & is.na( cat)] <- "cereals"
cat[ map == "sugarcane"] <- "sugarcane"
cat[ type == "herbaceous" & group == "Forage"] <- "forage"
cat[ type == "herbaceous" & is.na( cat)] <- "field_crop"
cat[ type == "shrub"] <- "shrub_crop"
cat[ type == "tree"] <- "tree_crop"
})
catLists <- dlply( cropTable, .(cat), function( row) row$map)
mapNcName <- function( map) {
paste( cropsWd,
paste( map, "5min.vrt",
sep="_"),
sep="/")
}
catStacks <- llply( catLists, function( maps) {
if( length( maps) ==1) {
subset( brick( mapNcName( maps[ 1])), 1)
} else {
do.call( stack, llply( maps, function( map) {
subset( brick( mapNcName( map)), 1)
}))
}})
cusaMask <- raster( "mask_cusa.tif")
cusaExtent <- extent( cusaMask)
catCropped <- llply( names( catStacks), function( c) {
fn <- paste( c, "crop.tif", sep="_")
if( overwriteRasters) {
crop( catStacks[[ c]], cusaExtent,
filename= fn,
overwrite= TRUE)
} else brick( list.files( getwd(), fn, full.names=TRUE))
})
names( catCropped) <- names( catStacks)
catMasked <- llply( names( catCropped), function( c) {
r <- if( nlayers( catCropped[[ c]]) ==1) {
catCropped[[ c]]
} else overlay( catCropped[[ c]], fun= sum)
raster::mask( r, cusaMask,
filename= paste( c, "tif", sep="."),
overwrite= TRUE)
})
names( catMasked) <- names( catStacks)
###################################################
### chunk number 43: fig_crops
###################################################
#line 1567 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
cropsMap <-
coverMaps( stack( catMasked[ 1:5]), 0.4) +
facet_grid( variable ~ .)
my.ggsave( texWd, "fig_crops.png",
width=5.5, height=8)
}
###################################################
### chunk number 44: fig_crops2
###################################################
#line 1589 "/home/nbest/thesis/datasets.Rnw"
if( overwriteFigures) {
cropsMap2 <-
coverMaps( stack( catMasked[ 6:10]), 0.4) +
facet_grid( variable ~ .)
my.ggsave( texWd, "fig_crops2.png",
width=5.5, height=8)
}
|
4d79f906921ccb94e25b7cb05f80fbcffade31ac
|
34e2217b2255e5bb192c2c724dbe78ca4c1b3c64
|
/man/document_link_params.Rd
|
bca4929e3df502d3f7edb5134f7d2c51cacdf542
|
[] |
no_license
|
kongdd/languageserver
|
df335d28f97868793b6a56b64b9671a24afa57ce
|
d3ae514ad9b708178217522029e97f087e98b343
|
refs/heads/master
| 2020-08-01T14:40:13.188378
| 2019-09-26T07:09:25
| 2019-09-26T07:09:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 409
|
rd
|
document_link_params.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interfaces.R
\name{document_link_params}
\alias{document_link_params}
\title{parameters for document link requests}
\usage{
document_link_params(uri)
}
\arguments{
\item{uri}{a character, the path to a file as defined by \href{https://tools.ietf.org/html/rfc3986}{RFC 3986}}
}
\description{
parameters for document link requests
}
|
c08e034ce381896b190b6ee3dc832590811d84f4
|
26e26aca4102f40bc848120c4ebc99bb40d4a3c1
|
/R/Archive/August 2020/FPLine decile.R
|
bdf459aa8c55380bd6e64116c01db39d2fce1120
|
[] |
no_license
|
IPRCIRI/IRHEIS
|
ee6c00dd44e1e4c2090c5ef4cf1286bcc37c84a1
|
1be8fa815d6a4b2aa5ad10d0a815c80a104c9d12
|
refs/heads/master
| 2023-07-13T01:27:19.954174
| 2023-07-04T09:14:58
| 2023-07-04T09:14:58
| 90,146,792
| 13
| 6
| null | 2021-12-09T12:08:58
| 2017-05-03T12:31:57
|
R
|
UTF-8
|
R
| false
| false
| 16,622
|
r
|
FPLine decile.R
|
#166-Step 6- FoodBasicNeeds.R
#
# Copyright © 2018:Majid Einian & Arin Shahbazian
# Licence: GPL-3
rm(list=ls())
starttime <- proc.time()
cat("\n\n================ Prepare Data =====================================\n")
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(data.table)
library(ggplot2)
for(year in (Settings$startyear:Settings$endyear)){
cat(paste0("\n------------------------------\nYear:",year,"\n"))
# load data --------------------------------------
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"FINALPOORS.rda"))
PD<-MD[,.(HHID,FinalPoor)]
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"InitialPoorClustered.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"FoodPrices.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"FoodGrams.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"Deciles.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"Total2.rda"))
MD[,Decile:=NULL]
MD<-merge(MD,FoodPrices,all.x=TRUE,by="HHID")
MD<-merge(MD,FoodGrams,all.x=TRUE,by="HHID")
MD<-merge(MD,PD,all.x=TRUE,by="HHID")
MD<-merge(MD,Deciles,all.x=TRUE,by="HHID")
MD<-merge(MD,Total[,.(HHID,`011231`,`011232`,`011441`,`011442`,G01153)],all.x=TRUE,by="HHID")
# y<-MD[,weighted.mean(FoodKCaloriesHH_Per,Weight),by=c("Region","Decile")]
MD[,NewPoor:=InitialPoor]
MD[,OldPoor:=1]
i <- 0
while(MD[(NewPoor-OldPoor)!=0,.N]>0.001*nrow(MD[NewPoor==1]) & i <=15){
# cat(nrow(MD[NewPoor==1]))
i <- i + 1
MD[,ThisIterationPoor:=NewPoor]
MD[,FPLine:=NULL]
MD[,Selected_Group:=ifelse((Region=="Urban" & Decile==3) |
(Region=="Rural" & Decile==2),1,0)]
MDP <- MD[Selected_Group==1,
.(FPLine=0.001*
(weighted.mean(LavashPrice,Weight,na.rm = TRUE)*weighted.mean(BreadGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(Rice_TaromPrice,Weight,na.rm = TRUE)*weighted.mean(GrainGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(MacaroniPrice,Weight,na.rm = TRUE)*weighted.mean(MacaroniGram/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(AdasPrice,Weight,na.rm = TRUE)*weighted.mean((AdasGram+Loobia_ChitiGram+NokhodGram)/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(SibzaminiPrice,Weight,na.rm = TRUE)*weighted.mean(SibzaminiGram/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(Sabzi_KhordanPrice,Weight,na.rm = TRUE)*weighted.mean(VegetableShrubsGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(Banana_CoconutPrice,Weight,na.rm = TRUE)*weighted.mean(TreeFruitsGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(LivestockGrams,Weight,na.rm = TRUE)*weighted.mean(LivestockGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(PoultryMeat_MPrice,Weight,na.rm = TRUE)*weighted.mean(PoultryMeat_MGram/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(Egg_MashinPrice,Weight,na.rm = TRUE)*weighted.mean(Egg_MashinGram/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(Milk_PasteurizedPrice,Weight,na.rm = TRUE)*weighted.mean((MilkproductsGrams+MilkGrams)/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(Oil_NabatiPrice,Weight,na.rm = TRUE)*weighted.mean(Oil_NabatiGram/(EqSizeCalory),Weight*Size,na.rm = TRUE)+
weighted.mean(GhandPrice,Weight,na.rm = TRUE)*weighted.mean(GhandGram/(EqSizeCalory),Weight*Size,na.rm = TRUE))),
by=.(cluster3,Region)]
price<- MD[Selected_Group==1,
.(LavashPrice=weighted.mean(LavashPrice,Weight,na.rm = TRUE),
Rice_TaromPrice=weighted.mean(Rice_TaromPrice,Weight,na.rm = TRUE),
MacaroniPrice=weighted.mean(MacaroniPrice,Weight,na.rm = TRUE),
HobubatPrice=weighted.mean(AdasPrice,Weight,na.rm = TRUE),
SibzaminiPrice=weighted.mean(SibzaminiPrice,Weight,na.rm = TRUE),
Sabzi_KhordanPrice=weighted.mean(Sabzi_KhordanPrice,Weight,na.rm = TRUE),
Banana_CoconutPrice=weighted.mean(Banana_CoconutPrice,Weight,na.rm = TRUE),
LivestockGrams=weighted.mean(LivestockGrams,Weight,na.rm = TRUE),
PoultryMeat_MPrice=weighted.mean(PoultryMeat_MPrice,Weight,na.rm = TRUE),
Egg_MashinPrice=weighted.mean(Egg_MashinPrice,Weight,na.rm = TRUE),
Milk_PasteurizedPrice=weighted.mean(Milk_PasteurizedPrice,Weight,na.rm = TRUE),
Oil_NabatiPrice=weighted.mean(Oil_NabatiPrice,Weight,na.rm = TRUE),
GhandPrice=weighted.mean(GhandPrice,Weight,na.rm = TRUE))]
#by=.(cluster3,Region)]
gram <- MD[Selected_Group==1,
.(BreadGrams=weighted.mean(BreadGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE),
GrainGrams=weighted.mean(GrainGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE),
MacaroniGram=weighted.mean(MacaroniGram/(EqSizeCalory),Weight*Size,na.rm = TRUE),
HobubatGrams=weighted.mean((AdasGram+Loobia_ChitiGram+NokhodGram)/(EqSizeCalory),Weight*Size,na.rm = TRUE),
SibzaminiGram=weighted.mean(SibzaminiGram/(EqSizeCalory),Weight*Size,na.rm = TRUE),
VegetableShrubsGrams=weighted.mean(VegetableShrubsGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE),
TreeFruitsGrams=weighted.mean(TreeFruitsGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE),
LivestockGrams=weighted.mean(LivestockGrams/(EqSizeCalory),Weight*Size,na.rm = TRUE),
PoultryMeat_MGram=weighted.mean(PoultryMeat_MGram/(EqSizeCalory),Weight*Size,na.rm = TRUE),
Egg_MashinGram=weighted.mean(Egg_MashinGram/(EqSizeCalory),Weight*Size,na.rm = TRUE),
MilkproductsGrams=weighted.mean((MilkproductsGrams+MilkGrams)/(EqSizeCalory),Weight*Size,na.rm = TRUE),
Oil_NabatiGram=weighted.mean(Oil_NabatiGram/(EqSizeCalory),Weight,na.rm = TRUE),
GhandGram=weighted.mean(GhandGram/(EqSizeCalory),Weight*Size,na.rm = TRUE))]
# by=.(cluster3,Region)]
gram<-t(gram)
MDP[is.na(MDP)] <- 0
min<-MDP[FPLine>0,min(FPLine)]
MDP[,FPLine:=ifelse(FPLine==0,min,FPLine)]
Bundle <- MD[,
.( BreadGrams=weighted.mean(BreadGrams/EqSizeCalory,Weight,na.rm = TRUE),
BerenjKhareji= weighted.mean((Rice_Khareji1Gram+Rice_Khareji2Gram)/EqSizeCalory,Weight,na.rm = TRUE),
BerenjIrani= weighted.mean((Rice_TaromGram+ Rice_AshGram+
Rice_NonameGram+ Rice_MahaliGram+Rice_DomsiahGram+
Rice_KhoordeGram)/EqSizeCalory,Weight,na.rm = TRUE),
MacaroniGram= weighted.mean(MacaroniGram/EqSizeCalory,Weight,na.rm = TRUE),
HoboobatGram= weighted.mean((AdasGram+Loobia_ChitiGram+NokhodGram)/EqSizeCalory,Weight,na.rm = TRUE),
SibzaminiGram= weighted.mean(SibzaminiGram/EqSizeCalory,Weight,na.rm = TRUE),
VegetableShrubsGrams= weighted.mean(VegetableShrubsGrams/EqSizeCalory,Weight,na.rm = TRUE),
TreeFruitsGrams= weighted.mean(TreeFruitsGrams/EqSizeCalory,Weight,na.rm = TRUE),
CowMeatGram= weighted.mean(CowMeatGram/EqSizeCalory,Weight,na.rm = TRUE),
SheepGrams= weighted.mean(SheepMeatGram/EqSizeCalory,Weight,na.rm = TRUE),
PoultryMeat_MGram= weighted.mean(PoultryMeat_MGram/EqSizeCalory,Weight,na.rm = TRUE),
Egg_MashinGram= weighted.mean(Egg_MashinGram/EqSizeCalory,Weight,na.rm = TRUE),
MilkproductsGrams= weighted.mean((MilkproductsGrams+MilkGrams)/EqSizeCalory,Weight,na.rm = TRUE),
Oil_NabatiGram= weighted.mean((Oil_NabatiGram+Oil_OliveGram+Oil_Nabati_OtherGram)/EqSizeCalory,Weight,na.rm = TRUE),
GhandGram= weighted.mean((GhandGram+ShekarGram)/EqSizeCalory,Weight,na.rm = TRUE))
#,by="Region"
]
Bundle2 <- MD[,
.( BreadGrams=weighted.mean(BreadGrams/EqSizeCalory,Weight,na.rm = TRUE),
BerenjKhareji= weighted.mean((Rice_Khareji1Gram+Rice_Khareji2Gram)/EqSizeCalory,Weight,na.rm = TRUE),
BerenjIrani= weighted.mean((Rice_TaromGram+ Rice_AshGram+
Rice_NonameGram+ Rice_MahaliGram+Rice_DomsiahGram+
Rice_KhoordeGram)/EqSizeCalory,Weight,na.rm = TRUE),
MacaroniGram= weighted.mean(MacaroniGram/EqSizeCalory,Weight,na.rm = TRUE),
HoboobatGram= weighted.mean((AdasGram+Loobia_ChitiGram+NokhodGram)/EqSizeCalory,Weight,na.rm = TRUE),
SibzaminiGram= weighted.mean(SibzaminiGram/EqSizeCalory,Weight,na.rm = TRUE),
VegetableShrubsGrams= weighted.mean(VegetableShrubsGrams/EqSizeCalory,Weight,na.rm = TRUE),
TreeFruitsGrams= weighted.mean(TreeFruitsGrams/EqSizeCalory,Weight,na.rm = TRUE),
CowMeatGram= weighted.mean(CowMeatGram/EqSizeCalory,Weight,na.rm = TRUE),
SheepGrams= weighted.mean(SheepMeatGram/EqSizeCalory,Weight,na.rm = TRUE),
PoultryMeat_MGram= weighted.mean(PoultryMeat_MGram/EqSizeCalory,Weight,na.rm = TRUE),
Egg_MashinGram= weighted.mean(Egg_MashinGram/EqSizeCalory,Weight,na.rm = TRUE),
MilkproductsGrams= weighted.mean((MilkproductsGrams+MilkGrams)/EqSizeCalory,Weight,na.rm = TRUE),
Oil_NabatiGram= weighted.mean((Oil_NabatiGram+Oil_OliveGram+Oil_Nabati_OtherGram)/EqSizeCalory,Weight,na.rm = TRUE),
GhandGram= weighted.mean((GhandGram+ShekarGram)/EqSizeCalory,Weight,na.rm = TRUE))
,by="Region"
]
Bundle3 <- MD[,
.( BreadGrams=weighted.mean(BreadGrams/EqSizeCalory,Weight,na.rm = TRUE),
BerenjKhareji= weighted.mean((Rice_Khareji1Gram+Rice_Khareji2Gram)/EqSizeCalory,Weight,na.rm = TRUE),
BerenjIrani= weighted.mean((Rice_TaromGram+ Rice_AshGram+
Rice_NonameGram+ Rice_MahaliGram+Rice_DomsiahGram+
Rice_KhoordeGram)/EqSizeCalory,Weight,na.rm = TRUE),
MacaroniGram= weighted.mean(MacaroniGram/EqSizeCalory,Weight,na.rm = TRUE),
HoboobatGram= weighted.mean((AdasGram+Loobia_ChitiGram+NokhodGram)/EqSizeCalory,Weight,na.rm = TRUE),
SibzaminiGram= weighted.mean(SibzaminiGram/EqSizeCalory,Weight,na.rm = TRUE),
VegetableShrubsGrams= weighted.mean(VegetableShrubsGrams/EqSizeCalory,Weight,na.rm = TRUE),
TreeFruitsGrams= weighted.mean(TreeFruitsGrams/EqSizeCalory,Weight,na.rm = TRUE),
CowMeatGram= weighted.mean(CowMeatGram/EqSizeCalory,Weight,na.rm = TRUE),
SheepGrams= weighted.mean(SheepMeatGram/EqSizeCalory,Weight,na.rm = TRUE),
PoultryMeat_MGram= weighted.mean(PoultryMeat_MGram/EqSizeCalory,Weight,na.rm = TRUE),
Egg_MashinGram= weighted.mean(Egg_MashinGram/EqSizeCalory,Weight,na.rm = TRUE),
MilkproductsGrams= weighted.mean((MilkproductsGrams+MilkGrams)/EqSizeCalory,Weight,na.rm = TRUE),
Oil_NabatiGram= weighted.mean((Oil_NabatiGram+Oil_OliveGram+Oil_Nabati_OtherGram)/EqSizeCalory,Weight,na.rm = TRUE),
GhandGram= weighted.mean((GhandGram+ShekarGram)/EqSizeCalory,Weight,na.rm = TRUE))
,by="FinalPoor"
]
Bundle4 <- MD[,
.( BreadGrams=weighted.mean(BreadGrams/EqSizeCalory,Weight,na.rm = TRUE),
BerenjKhareji= weighted.mean((Rice_Khareji1Gram+Rice_Khareji2Gram)/EqSizeCalory,Weight,na.rm = TRUE),
BerenjIrani= weighted.mean((Rice_TaromGram+ Rice_AshGram+
Rice_NonameGram+ Rice_MahaliGram+Rice_DomsiahGram+
Rice_KhoordeGram)/EqSizeCalory,Weight,na.rm = TRUE),
MacaroniGram= weighted.mean(MacaroniGram/EqSizeCalory,Weight,na.rm = TRUE),
HoboobatGram= weighted.mean((AdasGram+Loobia_ChitiGram+NokhodGram)/EqSizeCalory,Weight,na.rm = TRUE),
SibzaminiGram= weighted.mean(SibzaminiGram/EqSizeCalory,Weight,na.rm = TRUE),
VegetableShrubsGrams= weighted.mean(VegetableShrubsGrams/EqSizeCalory,Weight,na.rm = TRUE),
TreeFruitsGrams= weighted.mean(TreeFruitsGrams/EqSizeCalory,Weight,na.rm = TRUE),
CowMeatGram= weighted.mean(CowMeatGram/EqSizeCalory,Weight,na.rm = TRUE),
SheepGrams= weighted.mean(SheepMeatGram/EqSizeCalory,Weight,na.rm = TRUE),
PoultryMeat_MGram= weighted.mean(PoultryMeat_MGram/EqSizeCalory,Weight,na.rm = TRUE),
Egg_MashinGram= weighted.mean(Egg_MashinGram/EqSizeCalory,Weight,na.rm = TRUE),
MilkproductsGrams= weighted.mean((MilkproductsGrams+MilkGrams)/EqSizeCalory,Weight,na.rm = TRUE),
Oil_NabatiGram= weighted.mean((Oil_NabatiGram+Oil_OliveGram+Oil_Nabati_OtherGram)/EqSizeCalory,Weight,na.rm = TRUE),
GhandGram= weighted.mean((GhandGram+ShekarGram)/EqSizeCalory,Weight,na.rm = TRUE))
,by="Decile"
]
Bundle5 <- MD[,.( PoultryMeat_MGram= weighted.mean((`011231`+`011232`)/Total_Exp_Month,Weight,na.rm = TRUE),
Egg_MashinGram= weighted.mean((`011441`+`011442`)/Total_Exp_Month,Weight,na.rm = TRUE),
Oil_NabatiGram= weighted.mean(G01153/Total_Exp_Month,Weight,na.rm = TRUE)),by="Decile"]
Bundle6 <- MD[,.( PoultryMeat_MGram= weighted.mean((`011231`+`011232`)/Total_Exp_Month,Weight,na.rm = TRUE),
Egg_MashinGram= weighted.mean((`011441`+`011442`)/Total_Exp_Month,Weight,na.rm = TRUE),
Oil_NabatiGram= weighted.mean(G01153/Total_Exp_Month,Weight,na.rm = TRUE))]
MD <- merge(MD,MDP,by=c("Region","cluster3"))
# print(MDP)
#x<-MD[,.(NewArea,Region,FPLine,InitialPoor)]
MD[,NewPoor:=ifelse(TOriginalFoodExpenditure_Per < FPLine,1,0)]
# print(table(MD[,.(ThisIterationPoor,NewPoor)]))
MD[,OldPoor:=ThisIterationPoor]
}
MD[,FinalFoodPoor:=OldPoor]
# MD <- MD[,.(HHID,HIndivNo,Region,NewArea,NewArea_Name,cluster3,ProvinceCode,Size,HAge,HSex,Month,ServiceExp,
# HLiterate,HEduLevel0,HActivityState,Area,Rooms,MetrPrice,Total_Exp_Month_nondurable,
# Total_Exp_Month_Per_nondurable,TOriginalFoodExpenditure_Per,
# OriginalFoodExpenditure_Per,FPLine,Weight,Percentile,FinalFoodPoor,
# Total_Exp_Month_Per,TFoodKCaloriesHH_Per,TOriginalFoodExpenditure,Total_Exp_Month,
# TFoodExpenditure2,Total_Exp_Month_nondurable2,Total_Exp_Month2,
# Total_Exp_Month_Per2,
# EqSizeOECD,EqSizeCalory,Decile,Bundle_Value)]
save(MD,file=paste0(Settings$HEISProcessedPath,"Y",year,"FinalFoodPoor.rda"))
MD[,weighted.mean(FinalFoodPoor,Weight)]
# MDFinalfood<-MD[,.(HHID,Region,NewArea,cluster3,Percentile,FinalFoodPoor)]
# UrbanFinalfood<-MDFinalfood[Region=="Urban"]
# RuralFinalfood<-MDFinalfood[Region=="Rural"]
# save(UrbanFinalfood, file=paste0(Settings$HEISProcessedPath,"Y",year,"UrbanFinalfood.rda"))
# save(RuralFinalfood, file=paste0(Settings$HEISProcessedPath,"Y",year,"RuralFinalfood.rda"))
#
MD[,weighted.mean(FinalFoodPoor,Weight),by=c("Region","ProvinceCode")][order(Region,ProvinceCode)]
MD[,weighted.mean(FinalFoodPoor,Weight),by=cluster3][order(cluster3)]
# cat(MD[,weighted.mean(FPLine,Weight)])
# cat(MD[cluster3==13,weighted.mean(Calory_Price,Weight)])
#cat(MD[cluster3==1,weighted.mean(TOriginalFoodExpenditure_Per,Weight)])
x<-MD[,weighted.mean(FPLine,Weight),by="cluster3"]
# cat(MD[,weighted.mean(TOriginalFoodExpenditure_Per,Weight)],"\n")
# cat(MD[,weighted.mean(TFoodKCaloriesHH_Per,Weight,na.rm = TRUE)],"\n")
# cat(MD[,weighted.mean(Calory_Price,Weight,na.rm = TRUE)],"\n")
# cat(MD[cluster3==1,weighted.mean(FPLine,Weight,na.rm = TRUE)],"\n")
cat(MD[,weighted.mean(FPLine,Weight,na.rm = TRUE)],"\n")
}
endtime <- proc.time()
cat("\n\n============================\nIt took ")
cat((endtime-starttime)["elapsed"])
cat(" seconds")
|
f46162645e9d1d6b02750689f6c8ff8532565949
|
3d8a30386c98b68d36330212ccfde745ced8cce7
|
/data-raw/process_internal_data.R
|
a5d656fbfc15174b3ab5707178dcca93cc397be2
|
[
"MIT"
] |
permissive
|
robbriers/stationaRy
|
f0c8d915a65f656aab9b189b1fa6d8c9389635d5
|
517d0316057198bcfdb3dce7489f25e428b1c073
|
refs/heads/master
| 2022-11-15T02:43:35.665986
| 2022-10-25T09:12:43
| 2022-10-25T09:12:43
| 194,041,119
| 0
| 0
| null | 2019-06-27T07:04:24
| 2019-06-27T07:04:24
| null |
UTF-8
|
R
| false
| false
| 193
|
r
|
process_internal_data.R
|
library(stationaRy)
library(sf)
library(usethis)
history_tbl <- stationaRy:::get_history_tbl(perform_tz_lookup = TRUE)
usethis::use_data(
history_tbl,
internal = TRUE, overwrite = TRUE
)
|
ed82372c0bf51f2e5b8dc171bb7e622100d83c50
|
eacbb8f1937441c570c61679b50b37bbd480fe90
|
/vignettes/MRCIEUGTEx.R
|
6c05dc7bc450fae81ae1c107ff59063afa0e64ab
|
[] |
no_license
|
mbyvcm/MRCIEUGTEx
|
6ae821064d09adc4fb385bd9c1a9e17a4da09736
|
25af5ed685b92711d9612e7480c2f6eabdcfd080
|
refs/heads/master
| 2021-06-17T11:28:29.808399
| 2017-04-21T13:47:00
| 2017-04-21T13:47:00
| 86,478,628
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 715
|
r
|
MRCIEUGTEx.R
|
## ---- eval=FALSE---------------------------------------------------------
# # requires devtools
# install.packages('devtools')
# library(devtools)
#
# # install package from github
# install_github("mbyvcm/MRCIEUGTEx", quiet = T)
# library(MRCIEUGTEx)
## ---- eval=FALSE---------------------------------------------------------
# # path to GTEx VCF file
# gtex_vcf_dir <- ""
#
# # path to expression & covariate tar directories downloaded from GTEx Portal
# covariate_matrix_tar <- ""
# expression_matrix_tar <- ""
## ---- eval=F-------------------------------------------------------------
# source('config.R')
## ---- eval=FALSE---------------------------------------------------------
#
|
522cb54143c010b0a7f246702439dda76d0265b2
|
a611bd21c8fbbeae34f1a90013d34655ea817a18
|
/lession7/text_mining_basic.R
|
e7a92af57c116d3c83b8f94d1f886e48c594125f
|
[] |
no_license
|
chunam76/RStudy
|
cec36c3b795588afe087cd470e30c9c81a8df92f
|
7bfca66e4141ad4e7bbae5789d351f130c4db754
|
refs/heads/master
| 2020-04-26T22:12:17.708891
| 2019-03-15T04:50:37
| 2019-03-15T04:50:37
| 173,864,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
r
|
text_mining_basic.R
|
## 텍스트 데이터 분석
# 1) text mining 패키지 불러오기
library(tm)
# 2) Crude 데이터 불러오기
data("crude")
tdm <- TermDocumentMatrix(crude)
tdm
# 3) 단어 탐색
inspect(tdm)
# 4) 10회 이상 존재하는 단어만 출력
findFreqTerms(tdm,lowfreq=10)
# 5) oil 단어와 관련 높은 단어 출력
findAssocs(tdm,"oil",0.7)
# 6) 단어빈도 막대 그래프
freq <- sort(rowSums(as.matrix(tdm)), decreasing=TRUE)
freq
wf <- data.frame(word=names(freq), freq=freq)
wf
library(ggplot2)
ggplot(subset(wf, freq>20), aes(word, freq))+ geom_bar(stat="identity")+ theme_bw()
|
17c3ed87cc6037c9ba192ab8fb7a97c80ed81760
|
c79fa021f5bb195a4abfcf81d88a49b5ae86ce73
|
/tests/testthat.r
|
8d69e45f5e38f72a8bda00702db677ee7911eb75
|
[
"MIT"
] |
permissive
|
topepo/sparsediscrim
|
7c99e48f9552455c494e6a04ab2baabd4044a813
|
60198a54e0ced0afa3909121eea55321dd04c56f
|
refs/heads/main
| 2021-08-08T17:04:45.633377
| 2021-06-28T00:27:34
| 2021-06-28T00:27:34
| 313,120,774
| 4
| 0
|
NOASSERTION
| 2021-06-28T00:27:34
| 2020-11-15T20:51:32
|
R
|
UTF-8
|
R
| false
| false
| 70
|
r
|
testthat.r
|
library(testthat)
library(sparsediscrim)
test_check("sparsediscrim")
|
0b53f13f7ea90a44108eff25c3937c4618abdcb1
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610055947-test.R
|
6d91a9c6947992ab3e6c2c8ccd1561288ee4a448
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 704
|
r
|
1610055947-test.R
|
testlist <- list(a = -1073741825L, b = 704643071L, x = c(1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, -1195919433L, -1212696654L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212696648L, 1212172360L, 1212696648L, 1224736767L, -1L, -8323073L, -1L, -1L, 543387502L, 1936992767L, -458964L, -16550282L, 1981546321L, -1L, 905081681L, -218959169L, -1L, -13284L, -858993460L, -855638017L, 689656319L, 704642864L, -1L, -1L, -13563137L, -203L, -218959118L, 822804479L, -51726L, -218959169L, -13487361L, -13284L, 822804277L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
a4afefd6a88de60529826a31ac51b97f34d4423d
|
79afffae6d108b1a93aea7c72a55cf1fc7247498
|
/man/Polynomial2.rd
|
0b1fe47bff3e335613ac0c3daed7a54513350ae2
|
[] |
no_license
|
cran/assist
|
efbbad8da52741412f5dc933457774672de90b12
|
866a22f739a0e84d8631044225e3676651c987f2
|
refs/heads/master
| 2023-09-01T13:13:28.031385
| 2023-08-22T07:00:02
| 2023-08-22T07:30:44
| 17,718,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,268
|
rd
|
Polynomial2.rd
|
\name{Polynomial2}
\alias{linear2}
\alias{cubic2}
\alias{quintic2}
\alias{septic2}
\title{
Calculate Reproducing Kernels for Polynomial Splines on [0, T]
}
\description{
Return a matrix evaluating reproducing kernels for polynomial splines at observed points.
}
\usage{
linear2(s, t=s)
cubic2(s, t=s)
quintic2(s, t=s)
septic2(s, t=s)
}
\arguments{
\item{s}{
a vector of non-negative values, at which the kernels are evaluated.
}
\item{t}{
an optional non-negative vector. Default is the same as s.
}
}
\details{
The reproducing kernels implemented in these functions are based on Green functions. The domain is
[0, T], where T is a given positive number.
}
\value{
a matrix with the numbers of row and column equal to the length of s and t respectively.
The [i, j] element is the reproducing kernel of linear, cubic, quintic, or septic spline
evaluated at (s[i], t[j]).
}
\references{
Wahba, G. (1990). Spline Models for Observational Data. SIAM, Vol. 59.
}
\author{Chunlei Ke \email{chunlei_ke@yahoo.com} and Yuedong Wang \email{yuedong@pstat.ucsb.edu}}
\seealso{
\code{\link{ssr}}, \code{\link{linear}}, \code{\link{cubic}},
\code{\link{quintic}}, \code{\link{septic}}
}
\examples{
\dontrun{
x<- seq(0, 5, len=10)
linear2(x)
}
}
\keyword{file}
|
15b33a47cf0c82075b580348a3e9d9e0b60b5e71
|
b9897a73fa885f30ef468afa3b9ed0aba18bc9ec
|
/plot4.R
|
e0834f8fede6a5e62f3b19e6629dccc1c60eb9db
|
[] |
no_license
|
anilmuthineni/ExData_Plotting1
|
096b509daf2ffd2eebefb3f58daaecb101fb7431
|
452c6a3b2aadedb675de34fe0f7e60c253dc0536
|
refs/heads/master
| 2021-01-20T11:19:50.517429
| 2016-07-15T19:04:53
| 2016-07-15T19:04:53
| 63,436,760
| 0
| 0
| null | 2016-07-15T16:42:39
| 2016-07-15T16:42:38
| null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
plot4.R
|
# Load power consumption data
power_consumption_data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
# Create a column which contains both date and time
power_consumption_data$Datetime <- strptime(paste(power_consumption_data$Date, power_consumption_data$Time, " "), format = "%d/%m/%Y %H:%M:%S")
# Convert date strings into Dates
power_consumption_data$Date <- as.Date(power_consumption_data$Date, "%d/%m/%Y")
# Choose the data we are interested in
exploration_data <- power_consumption_data[power_consumption_data$Date <= "2007-02-02" & power_consumption_data$Date >= "2007-02-01",]
# Set png parameters
png(filename="plot4.png", width = 480, height = 480, bg = "white")
# Plot
par(mfrow = c(2, 2))
plot(exploration_data$Datetime, exploration_data$Global_active_power, type='l', xlab='', ylab='Global Active Power')
plot(exploration_data$Datetime, exploration_data$Voltage, type='l', xlab='datetime', ylab='Voltage')
plot(exploration_data$Datetime, exploration_data$Sub_metering_1, type='l', xlab='', ylab='Energy sub metering')
lines(exploration_data$Datetime, exploration_data$Sub_metering_2, type='l', col='red')
lines(exploration_data$Datetime, exploration_data$Sub_metering_3, type='l', col='blue')
legend('topright', legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'), col=c('black','red','blue'), bty='n', lty = 1)
plot(exploration_data$Datetime, exploration_data$Global_reactive_power, type='l', xlab='datetime', ylab='Global_reactive_power')
dev.off()
|
a5be61181ba5a852738fa73be486cb50c219f11a
|
779215d6b0ac83368f9c71f7a4aff494d64a0835
|
/myIBP/uniqueMatrix.R
|
3d48a36995e2cc13d552e42c1c91c634f960e4e8
|
[] |
no_license
|
luiarthur/byuMsProject
|
a257eccd12addca37b5a289ab47bb80f3fd1aecf
|
0a3c101de8b311639dd60355ff62f562e8399bbe
|
refs/heads/master
| 2021-01-19T08:32:20.541339
| 2015-03-31T16:14:12
| 2015-03-31T16:14:12
| 31,347,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,620
|
r
|
uniqueMatrix.R
|
toMat <- function(s) {
dims <- regexpr(": \\d* \\d*",s)
begin <- as.integer(dims)+2
end <- begin+attr(dims,"match.length")
dims <- substr(s,begin,end)
pos <- as.integer(regexpr(" ",dims))
dims <- c(substr(dims,1,pos-1),substr(dims,pos+1,nchar(dims)))
dims <- as.integer(dims)
mat <- substr(s,1,begin-3)
M <- matrix(0,dims[1],dims[2])
if (mat>" ") {
vec <- as.integer(strsplit(mat,",")[[1]])
M <- matrix(vec,dims[1],dims[2])
}
M
}
unique.matrix <- function(X) {
# Counts the number of unique matrices in a list.
# X = a list of matrices. We want the output to be:
# 1) a list of UNIQUE matrices
# 2) a vector of their counts
S <- lapply(X,function(x) paste(toString(x),":",nrow(x),ncol(x),collapse=","))
tab <- table(unlist(S))
counts <- as.integer(tab)
mat <- names(tab)
uniq.M <- lapply(as.list(mat),toMat)
ind <- sort(counts,index.return=T,decr=T)
ind <- ind$ix
list(counts[ind],uniq.M[ind])
}
Rapply <- function(L,f) { # L is a list, f is a function to apply to L[[x]]
# L apply takes a list, applies a function,
# and rbinds it. Assumes output is vector.
n <- length(L)
out <- apply(matrix(1:n),1,function(i) f(L[[i]]))
t(out)
}
get.freq <- function(m,Zs) {
N <- length(Zs)
m.name <- paste(toString(x),":",nrow(m),ncol(m),collapse=",")
S <- lapply(X,function(x) paste(toString(x),":",nrow(x),ncol(x),collapse=","))
tab <- table(unlist(S))
counts <- as.integer(tab)
mat <- names(tab)
count <- 0
if (m.name %in% mat) {
count <- counts[which(mat==m.name)]
}
count/N
}
|
9496b9b253313750ca12e03ab296805156dfacdf
|
a790ee7a53ce16fb0a9340cb848f4ec1fd69cbb7
|
/assist/banditAlgo.R
|
c9f49b2621fb7bc9aab8277c9e6e961bec5ec05f
|
[] |
no_license
|
NetZissou/Bandit
|
27c0045f77b1d81d1c4204cbf3c51461419e38a8
|
9ec387a22f0b51de378dfcac0d12baadcfc1cb89
|
refs/heads/main
| 2023-06-26T18:34:10.546210
| 2021-07-30T18:51:34
| 2021-07-30T18:51:34
| 340,757,733
| 1
| 0
| null | 2021-05-10T16:09:55
| 2021-02-20T21:32:02
|
R
|
UTF-8
|
R
| false
| false
| 5,572
|
r
|
banditAlgo.R
|
# Upper Confidence Bound
# Importing the dataset
library(tidyverse)
get_most_frequent_bandit <- function(bandit_selected) {
return(
as.numeric(
names(
sort(
table(bandit_selected), decreasing = T)
)[1]
)
)
}
dataset <-
tibble(
Ad.1 = rbernoulli(10000, p = 0.3),
Ad.2 = rbernoulli(10000, p = 0.2),
Ad.3 = rbernoulli(10000, p = 0.15),
Ad.4 = rbernoulli(10000, p = 0.35),
Ad.5 = rbernoulli(10000, p = 0.3),
Ad.6 = rbernoulli(10000, p = 0.4),
Ad.7 = rbernoulli(10000, p = 0.5),
Ad.8 = rbernoulli(10000, p = 0.55),
Ad.9 = rbernoulli(10000, p = 0.6),
Ad.10 = rbernoulli(10000, p = 0.45),
) %>%
mutate_if(is.logical, as.numeric)
# Implementing Random selection
rnd_selection <- function(data = dataset, seed = 1024) {
set.seed(seed)
N <- nrow(data)
d <- ncol(data)
ads_selected <- integer(0)
total_reward = 0
for (i in 1:N) {
ad <- sample(1:d, 1)
ads_selected <- append(ads_selected, ad)
reward <- data[[i, ad]]
total_reward <- total_reward + reward
}
cat("Total Reward: ", total_reward)
}
# UCB Random selection
ucb_selection <- function(data = dataset, seed = 1024) {
# ==================================================================== #
# ------------------------ Initialize -------------------------------- #
# ==================================================================== #
set.seed(seed)
N <- nrow(dataset) # N: total number of steps
d <- ncol(dataset) # d: total number of bandits
ads_selected <- integer(0) # max ucb selection for each step
numbers_of_selections <- rep(0, d) # n of selection for each bandits
sums_of_rewards <- rep(0, d) # sum of rewards for each bandits
total_reward <- 0 # total rewards
for (i in 1 : d) {
ads_selected <- c(ads_selected, i)
numbers_of_selections[i] <- numbers_of_selections[i] + 1
sums_of_rewards[i] <- sums_of_rewards[i] + data[[i, i]]
total_reward <- total_reward + data[[i, i]]
}
# ==================================================================== #
# ------------------------- Iteration -------------------------------- #
# ==================================================================== #
for (i in 1:(N-d)) {
max_ucb_ad <- 0
max_upper_bound <- 0
# Find the bandit with the max upper bound mean of rewards
for (j in 1:d) {
avg_reward <- sums_of_rewards[j] / numbers_of_selections[j]
delta <- sqrt(3/2 * log(i) / numbers_of_selections[j])
upper_bound <- avg_reward + delta
if (upper_bound > max_upper_bound) {
max_ucb_ad <- j
max_upper_bound <- upper_bound
}
}
ads_selected <- c(ads_selected, max_ucb_ad)
numbers_of_selections[max_ucb_ad] <- numbers_of_selections[max_ucb_ad] + 1
sums_of_rewards[max_ucb_ad] <- sums_of_rewards[max_ucb_ad] + data[[i, max_ucb_ad]]
total_reward <- total_reward + data[[i, max_ucb_ad]]
}
cat("Total Reward", total_reward, "\n")
cat("Best Bandit Discovered: ", get_most_frequent_bandit(ads_selected))
}
# Thomphson Sampling Strategy
# return: param logs
tps_sampling <- function(data = dataset) {
N <- nrow(data)
d <- ncol(data)
ads_selected <- integer(0)
numbers_of_reward_1 <-
rep(0, d) %>% set_names(paste0("alpha_", c(1:10)))
numbers_of_reward_0 <-
rep(0, d) %>% set_names(paste0("beta_", c(1:10)))
alpha_logs <- tibble(
bandit = paste0("alpha_", c(1:d)),
init = 0
) %>%
pivot_wider(
bandit, names_from = bandit,
values_from = init
)
beta_logs <- tibble(
bandit = paste0("beta_", c(1:d)),
init = 0
) %>%
pivot_wider(
bandit, names_from = bandit,
values_from = init
)
total_reward <- 0
# Prior of theta: theta_i ~ Beta(1, 1) / Uniform(0, 1)
# Likelihood: y|theta_i ~ Bernoulli(theta_i)
# Posterior: theta_i|Y = y ~ Beta(number of success + 1, number of failures + 1)
for (i in 1:N) {
max_beta_ad <- 0
max_random_beta <- 0
for (j in 1:d) {
random_beta <- rbeta(1,
shape1 = numbers_of_reward_1[j] + 1, # alpha
shape2 = numbers_of_reward_0[j] + 1 # beta
)
if (random_beta > max_random_beta) {
max_random_beta <- random_beta
max_beta_ad <- j
}
}
ads_selected <- c(ads_selected, max_beta_ad)
reward <- data[[i, max_beta_ad]]
if (reward == 1) {
numbers_of_reward_1[max_beta_ad] <- numbers_of_reward_1[max_beta_ad] + 1
} else {
numbers_of_reward_0[max_beta_ad] <- numbers_of_reward_0[max_beta_ad] + 1
}
alpha_logs <- alpha_logs %>%
bind_rows(
numbers_of_reward_1
)
beta_logs <- beta_logs %>%
bind_rows(
numbers_of_reward_0
)
total_reward <- total_reward + reward
}
cat("Total Reward", total_reward, "\n")
cat("Best Bandit Discovered: ", get_most_frequent_bandit(ads_selected))
param_logs <- list(
alpha = alpha_logs,
beta = beta_logs
)
return(param_logs)
}
plot_param_trace <- function(param_logs, bandit) {
trace_data <- tibble(
alpha = param_logs$alpha %>% pull(bandit),
beta = param_logs$beta %>% pull(bandit)
) %>%
mutate(
theta = alpha/(alpha + beta),
step = row_number()
)
trace_plot <- trace_data %>%
ggplot(aes(x = step, y = theta)) +
geom_line()
return(trace_plot)
}
|
ae956f2f23389729b66af70cbb891f73871b4d99
|
5e85df6e3edead3eca4a2a4730f1705d1228c23d
|
/unsorted_code/confint regressioni.R
|
ead7c6f8211624c483b95e7abb48f793fc3609d2
|
[
"MIT"
] |
permissive
|
giorgioarcara/R-code-Misc
|
125ff2a20531b2fbbc9536554042003b4e121766
|
decb68d1120e43df8fed29859062b6a8bc752d1d
|
refs/heads/master
| 2022-11-06T14:44:02.618731
| 2022-10-26T07:34:09
| 2022-10-26T07:34:09
| 100,048,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 650
|
r
|
confint regressioni.R
|
x=rnorm(100)
y=x+rnorm(length(x),sd=1)
confidence.level=.99
mod.lm=lm(y~x)
#### CALCOLO VARIANZA RESIDUA MODELLI
residual.error=sqrt(deviance(mod.lm)/df.residual(mod.lm))
residual.error=sqrt(sum(resid(mod.lm)^2)/df.residual(mod.lm))
dat=data.frame(x=x,y=y)
xref=seq(-4,4,0.5)
conf=qt(confidence.level, mod.lm$df.residual)*residual.error*sqrt((1/length(xref))+(((xref-mean(x))^2)/sum((xref-mean(x))^2)))
ymean=predict(mod.lm, newdata=list(x=seq(-4,4,1)))
conf.upper=predict(mod.lm, newdata=list(x=xref))+conf
conf.lower=predict(mod.lm, newdata=list(x=xref))-conf
plot(x,y)
lines(xref,conf.upper)
lines(xref,conf.lower)
abline(mod.lm)
|
922959e522c5e4cdfffa76d7b86c4462b1949dfc
|
7f09a3ac9f6e8b4d36f56e779fc0da3602288b85
|
/Rcode/dyncpsimu.r
|
2f330e528d0b102cb605da79f10a94a417c28d6f
|
[] |
no_license
|
singlesp/TVDN
|
707d0467205369d4c398ee5f41bd63d95ec6fa9a
|
3f33488084df84de6207450c2312e6061b94229c
|
refs/heads/master
| 2023-08-28T17:29:34.046832
| 2021-10-14T02:14:01
| 2021-10-14T02:14:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,194
|
r
|
dyncpsimu.r
|
rm(list = ls())
library(fda)
library(MASS)
library(glmnet)
library(mvtnorm)
library(R.matlab)
fMRI = readMat('../data/fMRI_sample.mat')
fMRI = fMRI$time.series
time = seq(0, 2, length.out = 180)
set.seed(2021) ##6chg6rank 2021
step = diff(time)[1]
dfMRI = fMRI
basis = create.bspline.basis(range(0, 3), nbasis = 15, norder = 4)
ebase = eval.basis(basis, time)
Lebase = eval.basis(basis, time, 1)
for(i in 1:nrow(fMRI)){
dfMRI[i, ]= predict(smooth.spline(time, fMRI[i, ], lambda = 0.001), deriv = 1)$y
}
nI = 20
dXmat = dfMRI
Xmat = fMRI
scan = rep(0, length(time))
for(i in nI:(length(time) - nI)){
ix1 = (i - nI + 1) : i
ix2 = ( i + 1) : (i + nI)
ix = (i-nI + 1) : (i + nI)
fullA = dXmat[, ix] %*% t(Xmat[, ix] ) %*% ginv(Xmat[, ix] %*% t(Xmat[, ix]) )
leftA = dXmat[, ix1] %*% t(Xmat[, ix1] ) %*% ginv(Xmat[, ix1] %*% t(Xmat[, ix1]))
rightA = dXmat[, ix2] %*% t(Xmat[, ix2] ) %*% ginv(Xmat[, ix2] %*% t(Xmat[, ix2]))
temp = as.vector(dXmat[, ix] - fullA %*% Xmat[, ix])
ssRfull = t(temp) %*% temp
temp = as.vector(dXmat[, ix1] - leftA %*% Xmat[, ix1])
ssRleft = t(temp) %*% temp
temp = as.vector(dXmat[, ix2] - rightA %*% Xmat[, ix2])
ssRright = t(temp) %*% temp
scan[i]= ssRfull - ssRleft - ssRright
}
candlist = NULL
for(i in nI:(length(time) - nI)){
if(scan[i] == max(scan[(i - nI + 1): (i + nI)]) & scan[i] > min(scan[(i - nI + 1): (i + nI)]))
candlist = c(candlist, i)
}
candlist = c(0, candlist, length(time))
fullA = array(NA, c(nrow(fMRI), nrow(fMRI), length(candlist)-1))
svdA = matrix(NA, nrow(fMRI), length(candlist)-1)
for(j in 1:(length(candlist) -1)){
left = candlist[j] + 1
right = candlist[j + 1]
ix = left :right
fullA[, , j]= dXmat[, ix] %*% t(Xmat[, ix] ) %*% ginv(Xmat[, ix] %*% t(Xmat[, ix]))
svdA[, j] = (eigen(fullA[, , j])$values)
}
AA = apply(fullA, c(1, 2), sum)
AA = svd(AA)
r = 6
six = 1:r
AA = AA$u[, six] %*% diag(AA$d[six]) %*% t(AA$v[, six])
temp = eigen(AA)
U = temp$vector
V = temp$values
V = V #* c(1 , 1, 0.5, 0.5, 0.2, 0.2, rep(0, length(V) - length(six)))
candlist0 = candlist
candlist0 =c(0, 50, 99, 144, 180) #candlist0[-2]# round(c(0.1, x0.23, 0.40, 0.65, 0.76, 0.91) * 180)#36, 99, 144
#candlist0 = c(0, 180)
ratio = seq(-7, 8, length.out = 12)
#ratio = ratio[-c(1:3)]
#ratio = rep(ratio[1:(r/2)], each = 2)
svdA = matrix(NA, nrow(Xmat), length(candlist0)-1)
for(j in 1:(length(candlist0)-1)){
svdA[, j]= V * ratio[j]/10
}
svdA[(r + 1):nrow(svdA), ] = 0
Res <- list()
Res[[1]] <- U
Res[[2]] <- svdA
save(Res, file="SimuEigen.RData")
numchg =3
nsim = 100
chg = matrix(NA, nsim, numchg )
mestU = array(NA, c(nrow(U), ncol(U), nsim))
truematrix = datamatrix = vector('list')
k = 1
errV = U %*% diag(svdA[, k]/10) %*%solve(U) * matrix(rbinom(90 *90, 1, 0.1), 90, 90)#
Xmat =Xmat1= dXmat1 = dXmat = matrix(NA, nrow(dfMRI), length(time))
Xmat1[, 1] =Xmat[, 1] = matrix(rnorm(nrow(Xmat), 0, (step)/8), nrow(Xmat),1)
for(itr in 1:nsim){
time = seq(0, 2, length.out = 180)
step = diff(time)[1]
Xmat[, 1] = Xmat1[, 1] + errV %*% matrix(rnorm(nrow(Xmat), 0, (step)/8), nrow(Xmat),1)
k = 1
dXmat1[, 1] = U %*% diag(svdA[, 1]) %*%solve(U)%*% Xmat1[, 1] * step
#Xmat[, 1] = Xmat1[, 1] + errV %*% matrix(rnorm(nrow(Xmat), 0, (step)/8), nrow(Xmat),1)
for(j in 2:length(time)){
# errV = U %*% diag(svdA[, k]/4) %*%t(V)
if(j %in% candlist0 & j != length(time)){
k = k + 1
}
Xmat1[, j]= Xmat1[, j-1] + dXmat1[, j-1]
Xmat[, j] = Xmat1[, j] + errV %*% matrix(rnorm(nrow(Xmat), 0, (step)/8), nrow(Xmat),1)
dXmat1[, j] = U [,six]%*% diag(svdA[six, k] * step) %*% solve(U)[six, ]%*% Xmat1[, j]
# print(svdA[, k])
}
Xmat = Re(Xmat)
plot(Xmat[1, ]~time, col = 1, type = 'l', lwd = 0.8, ylab = 'Signal', xlab = 'Time', ylim = range(Xmat), xlim = range(time))
for(i in 1:90){
lines (Xmat[i, ]~time, col = i, lwd = 0.8, ylab = 'Signal', xlab = 'Time')
}
dev.off()
datamatrix[[itr]]= Re(Xmat)
truematrix[[itr]] = Re(Xmat1)
print(itr)
}
trueU = U
save(datamatrix, truematrix, candlist0, trueU, file = 'datamatrix3chg6rank.Rdata')
|
47bdff4c92b1a56ca85278e560be35f1d9441225
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/12597_0/rinput.R
|
e75ffab880b04e98af7f3f420a07f94169ba4e4a
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("12597_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12597_0_unrooted.txt")
|
ba344c354026732566111c1a6c3c90eb6f8bf18b
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961646-test.R
|
79f19c964b45b0878b2b7a6ed8f1e28411d8bc71
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
1609961646-test.R
|
testlist <- list(x = c(-1L, -1L, -256L, 0L, 16777215L, -1537L, -687865865L, -2097153L, -1895825409L, -42L, 439346687L, -2049L, -536870913L, -134225962L, 439353164L, 520093695L, -2745809L, -1L, -2686977L, -134225921L, -1L, -1L, -704643072L, -268435457L, 5046271L, -449314817L, -54964L, -701287629L, 872374298L, 805306112L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
d7f0621d598d749cc6aac55b5720bbdcf419d151
|
9aaa5cbb46e412971a8d70fbab1894f86177564c
|
/R/nearth.R
|
3b14d5a91641e980878ff9122367334a7732efbd
|
[] |
no_license
|
BigelowLab/nearth
|
06c73292a27024fbbf810b0372481d74e462becb
|
e4956a12cb82926602c809eccdca6ad607bef011
|
refs/heads/master
| 2021-01-24T02:59:02.912855
| 2019-01-23T14:42:26
| 2019-01-23T14:42:26
| 49,924,500
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,258
|
r
|
nearth.R
|
#' Convert a 4-element bbox vector to a matrix of two columns (x and y)
#'
#' @export
#' @param x a 4-element numeric vector of left, right, bottom, top coordinates
#' @param close logical, if TRUE then close the polygon such that the first
#' and last verices are the same
#' @return a matrix of 2 columns and either 5 rows (closed) or 4 rows (open)
bbox_to_matrix <- function(x = c(-72,-63,39,46), close = TRUE){
if (close) {
x <- matrix(c(
x[1],x[2],x[2],x[1], x[1],
x[3],x[3],x[4],x[4], x[3]),
ncol = 2)
} else {
x <- matrix(c(
x[1],x[2],x[2],x[1],
x[3],x[3],x[4],x[4]),
ncol = 2)
}
x
}
#' Convert a 4-element bbox vector to a SpatialPolygons object
#'
#' @export
#' @param bb a 4-element numeric vector of left, bottom, right, top coordinates
#' @param proj_string a proj4string suitable to pass to \code{sp::CRS()}
#' @return a SpatialPolygons object
bbox_to_SpatialPolygons <- function(bb = c(-72,-63,39,46),
proj_string = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0"){
bb_p <- sp::Polygon(bbox_to_matrix(bb))
bb_ps <- sp::Polygons(list(bb_p), "bb")
sp::SpatialPolygons(list(bb_ps), proj4string = sp::CRS(proj_string))
}
#' Convert a 4-element bbox vector to a SpatialPolygonsDataFrame object
#'
#' @export
#' @param bb a 4-element numeric vector of left, bottom, right, top coordinates
#' @param ... further arguments for \code{bbox_to_SpatialPolygons}
#' @return a SpatialPolygons object
bbox_to_SpatialPolygonsDataFrame <- function(bb = c(-72,-63,39,46),...){
spolys <- bbox_to_SpatialPolygons(bb, ...)
sp::SpatialPolygonsDataFrame(spolys,
data = data.frame(ID = names(spolys), row.names = names(spolys)))
}
# Clip polygons (shapefile data) to a bounding box
#
# @export
# @param PP Spatial* object
# @param bb the 4 element bounding box to clip to [left, right, bottom, top]
# @return the clipped Spatial* object
#' Test for the existence of a Natural Earth path in R's \code{options()}
#'
#' @export
#' @param what character, either 'vector' (default) or 'raster'
#' @return named logical, TRUE of the path exists
has_nearth <- function(what = c('vector', 'raster')[1]){
path <- switch(tolower(what),
'raster' = 'NEARTH_RASTER_PATH',
'NEARTH_VECTOR_PATH')
NEARTH_PATH <- options(path)[[1]]
if (is.null(NEARTH_PATH)){
msg <- switch(tolower(what[1]),
'raster' = 'please set options to include NEARTH_RASTER_PATH',
'vector' = 'please set options to include NEARTH_VECTOR_PATH')
cat(msg, "\n")
x <- FALSE
names(x) <- path
} else {
names(NEARTH_PATH) <- NEARTH_PATH
x <- sapply(NEARTH_PATH, file.exists)
}
x
}
#' Retrieve the Natural Earth path
#'
#' @export
#' @param what character, either 'vector' (default) or 'raster'
#' @return the Natural Earth path or ""
nearth_path <- function(what = c('vector', 'raster')[1]){
ok <- has_nearth(what = what[1])
if (!ok[1]){
r = ""
} else {
r <- names(ok)[1]
}
r
}
#' Find the full path for a vector dataset
#'
#' @export
#' @param name character, one or more names to find
#' @param ext character the file extension to seek, by default '.shp'
#' @param path character the path to the Natural Earth vector datasets
#' @return a named character vector, missing files are returned as empty character
find_nearth_vectors <- function(name = 'ne_50m_coastline',
ext = '.shp',
path = nearth_path(what = 'vector') ){
stopifnot(has_nearth(what='vector'))
pat <- glob2rx(paste0("*",name, ext))
names(pat) <- name
sapply(pat,
function(x, path="."){
list.files(path, pattern = x, full.names = TRUE, recursive = TRUE)
},
path = path)
}
#' Find the full path for a raster dataset
#'
#' @export
#' @param name character, one or more names to find
#' @param ext character the file extension to seek, by default '.tif'
#' @param path character the path to the Natural Earth raster datasets
#' @return character, named logical, TRUE if the file exists
find_nearth_rasters <- function(name = 'GRAY_50M_SR_O',
ext = '.tif',
path = nearth_path(what = 'raster')){
stopifnot(has_nearth(what='raster'))
pat <- glob2rx(paste0("*",name, ext))
names(pat) <- name
sapply(pat,
function(x, path="."){
list.files(path, pattern = x, full.names = TRUE, recursive = TRUE)
},
path = path)
}
#' Strip the extension (e.g. ".ext") off one or more filenames
#'
#' @export
#' @param x character one or more filenames
#' @param sep character, separator between name of extension
#' @return named character vector
strip_extension <- function(x, sep = "."){
strip_one <- function(x, sep = '.'){
ix <- gregexpr(sep, x[1], fixed = TRUE)[[1]]
nix <- length(ix)
iy <- attr(ix, "match.length")[nix]
if ( iy > 0) x <- substring(x, 1, ix[nix] - 1)
x
}
sapply(x, strip_one, sep = sep)
}
#' Read one vector data set using \code{rgdal::readOGR()}
#'
#' @export
#' @param filename character, one file name
#' @param bb bounding box to clip to [left, right, bottom, top]
#' @param ... further arguments for \code{rgdal::readOGR()}
#' @return Spatial* class object or NULL
read_nearth_vector <- function(filename, bb = NULL, ...){
stopifnot(requireNamespace("rgdal", quietly = TRUE))
stopifnot(has_nearth(what='vector'))
stopifnot(file.exists(filename[1]))
x <- try(rgdal::readOGR(dsn = dirname(filename[1]),
layer = strip_extension(basename(filename[1])),
...))
if (inherits(x, "try-error")){
return(NULL)
}
if (!is.null(bb)){
spbb <- bbox_to_SpatialPolygonsDataFrame(bb, proj_string = sp::proj4string(x))
x <- raster::crop(x, spbb)
}
return(x)
}
#' Read one raster data set using \code{raster::raster()}, \code{raster::stack()}
#' or \code{raster::brick()}
#'
#' @export
#' @param filename character, one file name
#' @param form character one of "raster", "stack", "brick" (default)
#' @param bb bounding box to clip to [left, right, bottom, top]
#' @param ... further arguments for \code{raster::raster()}, \code{raster::stack()}
#' or \code{raster::brick()}
#' @return Raster* class object or NULL
read_nearth_raster <- function(filename,
form = c("raster", "stack", "brick")[3],
bb = NULL,
...){
stopifnot(requireNamespace("raster", quietly = TRUE))
stopifnot(has_nearth(what='raster'))
stopifnot(file.exists(filename[1]))
x <- switch(tolower(form[1]),
"raster" = try(raster::raster(filename[1], ...)),
"stack" = try(raster::stack(filename[1], ...)),
"brick" = try(raster::brick(filename[1], ...)) )
if (inherits(x, "try-error")){
return(NULL)
}
if (!is.null(bb)){
spbb <- bbox_to_SpatialPolygonsDataFrame(bb, proj_string = sp::proj4string(x))
x <- raster::crop(x, spbb)
}
return(x)
}
#' Read one or more Natural Earth data files.
#'
#' @export
#' @param name character, one or more names to find, must point to either
#' vectors or rasters but not a mix of the two. You can also provide the
#' full filepath but providing just the name can be easier.
#' @param what character either 'vector' (default) or 'raster'
#' @param ... further arguments for \code{nearth::read_nearth_raster()} or
#' or \code{nearth::read_nearth_vector()}
#' @return a list of Spatial* or Raster* objects, one per name
read_nearth <- function(name = 'ne_50m_coastline',
what = c('vector', 'raster')[1],
...){
stopifnot(has_nearth(what=what[1]))
name <- strip_extension(basename(name))
X <- NULL
if (tolower(what[1]) == "raster"){
ff <- find_nearth_rasters(name)
X <- lapply(ff,
function(x, ...){
if (nchar(x) > 0){
return(read_nearth_raster(x, ...))
} else {
return(NULL)
} },
...)
} else {
ff <- find_nearth_vectors(name)
X <- lapply(ff,
function(x, ...){
if (nchar(x) > 0){
return(read_nearth_vector(x, ...))
} else {
return(NULL)
} },
...)
}
invisible(X)
}
|
80b3468f16964db6bd310f0af628649ee998f6f3
|
3a6fa2e7370f06fefc35b327a157e11cb40fb7a7
|
/man/bisonR-package.Rd
|
4a6c462de0a995da00b232e362a9b11c305042b9
|
[
"MIT"
] |
permissive
|
JHart96/bisonR
|
70e5294ea3cc08d80e8815d9a9ee64100cda53db
|
f1d1b0731fe63c4c6e01f877e6040f313cdfabb5
|
refs/heads/main
| 2023-08-18T03:18:45.911681
| 2023-07-28T18:06:39
| 2023-07-28T18:06:39
| 471,447,630
| 4
| 1
|
NOASSERTION
| 2023-07-28T18:06:41
| 2022-03-18T16:52:44
|
R
|
UTF-8
|
R
| false
| true
| 274
|
rd
|
bisonR-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bisonR-package.R
\docType{package}
\name{bisonR-package}
\alias{bisonR-package}
\alias{bisonR}
\title{The 'bisonR' package.}
\description{
An R package for Bayesian Inference of Social Networks
}
|
51f071ff475d74fa7b9c6b31f18a3cdd64e54b5a
|
cbdba435e722691c8dc1b4fdaf7bad8eac798d12
|
/R/custom.period.summary.R
|
b0f9191a17fd1030048a722a5fbb4ddd6d4687d6
|
[] |
no_license
|
PALkitchen/activPAL
|
3d02eb612d3b8e4f09f1d329e94133c8e2a321ed
|
353266e8822db94ae097b675ba8420f42c73e972
|
refs/heads/master
| 2023-07-13T08:34:16.664201
| 2023-07-04T19:12:44
| 2023-07-04T19:12:44
| 195,839,458
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,592
|
r
|
custom.period.summary.R
|
custom.period.summary <-
function(input_folder,file_name,id,events_file_data,full_events_file,custom_periods){
walk_test_30_s <- activpal.stepping.process.file.by.period(full_events_file,30,86400,custom_periods)
walk_test_2_min <- activpal.stepping.process.file.by.period(full_events_file,120,86400,custom_periods)
walk_test_6_min <- activpal.stepping.process.file.by.period(full_events_file,360,86400,custom_periods)
walk_test_12_min <- activpal.stepping.process.file.by.period(full_events_file,720,86400,custom_periods)
observation_summary <- custom_periods[,c(5,1,2,3,4)]
colnames(observation_summary)[c(1,3:5)] <- c("uid","period_name","period_start","period_end")
# observation_summary$period_duration <- round(as.numeric(difftime(observation_summary$period_end,
# observation_summary$period_start,
# units = "hours")),3)
if(nrow(walk_test_30_s) == 0){
walk_test_summary <- observation_summary
walk_test_summary$peak_steps_30_seconds <- 0
walk_test_summary$peak_steps_2_minute <- 0
walk_test_summary$peak_steps_6_minute <- 0
walk_test_summary$peak_steps_12_minute <- 0
}else{
walk_test_30_s <- format.walk.test.by.period(walk_test_30_s, "30_seconds")
walk_test_2_min <- format.walk.test.by.period(walk_test_2_min, "2_minute")
walk_test_6_min <- format.walk.test.by.period(walk_test_6_min, "6_minute")
walk_test_12_min <- format.walk.test.by.period(walk_test_12_min, "12_minute")
walk_test_summary <- dplyr::inner_join(observation_summary,
dplyr::inner_join(dplyr::inner_join(dplyr::inner_join(
walk_test_30_s,walk_test_2_min, by = c("period_name","period_date")),
walk_test_6_min, by = c("period_name","period_date")),
walk_test_12_min, by = c("period_name","period_date")),
walk_test_12_min, by = c("period_name","period_date"))
}
lying_time_breaks <- process.breaks.in.time.in.bed.by.period(events_file_data)
lying_time_breaks <- lying_time_breaks %>% dplyr::filter(!is.na(period_name))
colnames(lying_time_breaks)[4] <- "Time in Bed Breaks"
non_wear_data <- build.non.wear.summary.by.period(events_file_data)
non_wear_data <- non_wear_data %>% dplyr::select(uid, period_name, period_date, bout_duration)
non_wear_data$bout_duration <- round(non_wear_data$bout_duration,2)
colnames(non_wear_data)[4] <- "Non Wear"
sedentary_data <- build.sedentary.summary.by.period(events_file_data)
sedentary_data <- sedentary_data[,-c(5)] %>%
dplyr::filter(!is.na(period_name)) %>%
tidyr::pivot_wider(names_from = "bout_length", names_expand = TRUE, values_from = "bout_duration")
sedentary_data[is.na(sedentary_data)] <- 0
sedentary_data[,c(4:ncol(sedentary_data)),] <- round(sedentary_data[,c(4:ncol(sedentary_data)),],3)
upright_data <- build.upright.summary.by.period(events_file_data)
upright_data <- format.upright.data.by.period(upright_data)
upright_data[,c(4:ncol(upright_data)),] <- round(upright_data[,c(4:ncol(upright_data)),],3)
stepping_data <- build.stepping.summary.by.period(events_file_data)
stepping_data <- stepping_data %>% dplyr::filter(!is.na(period_name))
travel_data <- build.travel.summary.by.period(events_file_data)
travel_data <- format.travel.data.by.period(travel_data)
travel_data[,c(4:ncol(travel_data)),] <- round(travel_data[,c(4:ncol(travel_data)),],3)
median_cadence_data <- median.cadence.bands.by.period(events_file_data,id, upright_bout = FALSE)
median_cadence_data <- format.median.cadence.by.period(median_cadence_data)
median_cadence_data[,c(4:ncol(median_cadence_data)),] <- round(median_cadence_data[,c(4:ncol(median_cadence_data)),],1)
mvpa_data <- build.stepping.intensity.summary.by.period(events_file_data)
mvpa_data <- format.mvpa.data.by.period(mvpa_data)
mvpa_data[,c(4:ncol(mvpa_data)),] <- round(mvpa_data[,c(4:ncol(mvpa_data)),],1)
time_first_step_data <- build.time.to.first.step.summary.by.period(events_file_data)
time_first_step_data <- time_first_step_data %>% dplyr::filter(!is.na(period_name))
daily_stepping_data <- events_file_data %>%
dplyr::group_by(.data$uid, .data$period_name, .data$period_date) %>%
dplyr::summarise(Steps = sum(.data$steps))
daily_stepping_data$period_date <- as.Date(daily_stepping_data$period_date, origin = "1970-01-01")
activity_data <- build.activity.summary.by.period(events_file_data, custom_periods)
observation_summary <- dplyr::left_join(observation_summary, sedentary_data, by = c("uid","period_name","period_date"))
observation_summary <- dplyr::left_join(observation_summary, lying_time_breaks, by = c("uid","period_name","period_date"))
observation_summary <- dplyr::left_join(observation_summary, daily_stepping_data, by = c("uid","period_name","period_date"))
observation_summary <- dplyr::left_join(observation_summary, upright_data, by = c("uid","period_name","period_date"))
observation_summary <- dplyr::left_join(observation_summary, non_wear_data, by = c("uid","period_name","period_date"))
observation_summary <- dplyr::left_join(observation_summary, mvpa_data, by = c("uid","period_name","period_date"))
observation_summary <- dplyr::left_join(observation_summary, median_cadence_data, by = c("uid","period_name","period_date"))
observation_summary <- dplyr::left_join(observation_summary, travel_data, by = c("uid","period_name","period_date"))
observation_summary <- dplyr::left_join(observation_summary, walk_test_summary, by = c("uid","period_name","period_date","period_start","period_end"))
observation_summary <- dplyr::left_join(observation_summary, activity_data, by = c("uid","period_name","period_date"))
observation_summary[is.na(observation_summary)] <- 0
return(observation_summary)
}
format.walk.test.by.period <-
function(walk_test, period){
walk_test_summary <- walk_test %>%
dplyr::select(period_name, period_date, steps)
colnames(walk_test_summary)[3] <- paste("peak_steps",period,sep="_")
# colnames(walk_test_summary)[4] <- paste(period,"stepping_duration",sep="_")
return(walk_test_summary)
}
format.upright.data.by.period <-
function(upright_data){
upright_data$bout_length <- factor(upright_data$bout_length,
levels = c("Quiet Standing","Stepping (< 1 minute)","Stepping (1 - 10 minutes)","Stepping (10 minutes +)"))
upright_summary <- upright_data[,-c(6)] %>%
dplyr::filter(!is.na(period_name)) %>%
dplyr::filter(bout_length != "Cycling") %>%
tidyr::pivot_wider(names_from = "bout_length", values_from = "bout_duration", names_expand = TRUE)
upright_summary[is.na(upright_summary)] <- 0
return(upright_summary)
}
format.travel.data.by.period <-
function(travel_data){
travel_summary <- travel_data[,-c(5)] %>%
dplyr::filter(!is.na(period_name)) %>%
tidyr::pivot_wider(names_from = "bout_length", values_from = "bout_duration")
travel_summary[is.na(travel_summary)] <- 0
return(travel_summary)
}
format.mvpa.data.by.period <-
function(mvpa_data){
mvpa_data$category <- factor(mvpa_data$category,
levels = c("LPA (< 75 spm)", "MPA (75 - 100 spm)",
"MVPA (100 - 125 spm)", "VPA (> 125 spm)"))
mvpa_data$duration <- factor(mvpa_data$duration, levels = c("short (< 60s)","long (>= 60s)"))
mvpa_summary <- mvpa_data %>%
dplyr::filter(!is.na(period_name)) %>%
tidyr::pivot_wider(names_from = c(4,5), values_from = 6, names_expand = TRUE)
return(mvpa_summary)
}
format.median.cadence.by.period <-
function(median_cadence_data){
median_cadence_data <- median_cadence_data %>%
dplyr::filter(!is.na(group)) %>%
mutate(group = paste("Median Cadence ",group,sep=""))
median_cadence_data$group <- factor(median_cadence_data$group,
levels = c("Median Cadence < 1 minute","Median Cadence 1 - 10 minutes","Median Cadence 10 minutes +"))
median_cadence_data <- median_cadence_data %>%
tidyr::pivot_wider(names_from = "group", values_from = "median_cadence", names_expand = TRUE)
return(median_cadence_data)
}
|
820505e0e1b8616d7feebaa3521e623917d21c1a
|
c2abe804dca918b233df380b23a1acc3e2fa9315
|
/GG_plot.R
|
47f8d602ec7b4e489dc3a9fc6bd7b638612d8059
|
[] |
no_license
|
anumaryjacob/git_demo_ipalnt
|
ab44effacf58763a1abea9c80cec66d587daac3a
|
478df4c77f60187fd7e16a7fdecea61e5cbf7222
|
refs/heads/master
| 2021-01-25T03:18:49.056313
| 2015-02-23T00:07:02
| 2015-02-23T00:07:02
| 31,173,359
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,309
|
r
|
GG_plot.R
|
# learning GG plot
# February 22, 2015
#making changes to check
install.packages("ggplot2", dependencies = TRUE)
install.packages("plyr")
install.packages("ggthemes")
install.packages("reshape2")
head(iris)
library("ggplot2")
library("reshape2")
library("plyr")
library("ggthemes")
myplot <- ggplot(data = iris, aes(x=Sepal.Length, y= Sepal.Width)) +
geom_point()
myplot + geom_point(size=3)
myplot <- ggplot(data = iris, aes(x=Sepal.Length, y= Sepal.Width, color = Species)) +
geom_point(aes(shape = Species), size = 3) +
facet_grid( . ~ Species)
myplot
myplot + geom_point(size=3)
myplot <- ggplot(data = iris, aes(x=Sepal.Length, y= Sepal.Width, color = Species)) +
geom_point(aes(shape = Species), size = 3) +
facet_wrap( ~ Species)
myplot
dim(diamonds)
sample(1:dim(diamonds),1000)
d2<-diamonds[sample(1:dim(diamonds),1000),]
head(d2)
ggplot(data = d2, aes(x=carat, y=price, color = color)) +
geom_point(size = 3)
myplot + geom_point(size=3)
library(MASS)
myplot<-ggplot(birthwt, aes(factor(race), bwt)) + geom_boxplot()
summary(myplot)
###Colors
library(RColorBrewer)
display.brewer.all()
df <- melt(iris, id.vars = "Species")
ggplot(df, aes(Species, value, fill = variable)) +
geom_bar(stat = "identity", position = "dodge") +
scale_fill_brewer(palette = "Set1")
|
a9d5d41a6d8881ca5f9e46467d2a7f57bb90d5b7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/svWidgets/examples/Img.Rd.R
|
d8534c51236881f20c19ba1e0425a7272c2193cd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,047
|
r
|
Img.Rd.R
|
library(svWidgets)
### Name: Img
### Title: Manipulate image resources for the GUIs
### Aliases: imgAdd imgDel imgGet imgNames imgType imgRead imgReadPackage
### print.guiImg
### Keywords: utilities
### ** Examples
## Not run:
##D ## These cannot be run by examples() but should be OK when pasted
##D ## into an interactive R session with the tcltk package loaded
##D
##D imgNames()
##D myImg <- imgAdd(system.file("gui", "logoSciViews.gif", package = "svWidgets"))
##D myImg # Note that $Tk. is prepended to the name!
##D imgNames()
##D imgType(myImg)
##D ## Place that logo in a Tk window
##D timg <- winAdd("timg", title = "A Tk window with image", pos ="-40+20")
##D labImg <- tklabel(timg, image = imgGet(myImg), bg = "white")
##D tkpack(labImg)
##D ## When the image resource is deleted, it is not displayed any more (no error)
##D imgDel(myImg)
##D imgNames()
##D winDel("timg")
##D ## To read all image resources at once (place this in .Lib.first())
##D imgReadPackage("svWidgets")
##D imgNames()
##D rm(myImg)
## End(Not run)
|
71f844a817c1efd3d0d7f44be77c1b3fd9c871f7
|
cd5e312b4260bf3a40ed0df893617bdbde7ec47c
|
/man/chitest.plot2.Rd
|
dd4299f8b4fcd9ff75c9e8e7619045ebae0e3fc3
|
[] |
no_license
|
tjssu/ssutat
|
bbc4ae9146153c0d7605ef57998fac6eb68bfb0f
|
c7cc4ba7e3d62e146f80c50e33800bac9c9cf79d
|
refs/heads/master
| 2022-11-28T21:53:29.239850
| 2020-08-12T01:58:32
| 2020-08-12T01:58:32
| 285,228,403
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,075
|
rd
|
chitest.plot2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch12-fn.R
\name{chitest.plot2}
\alias{chitest.plot2}
\title{Plot the Chi-square Test}
\usage{
chitest.plot2(stat, df, alp = 0.05, side = "two", pup = 0.999, dig = 4,
ppt = 50)
}
\arguments{
\item{stat}{Chi-square test statistic}
\item{df}{Degree of freedom}
\item{alp}{Level of significance, Default: 0.05}
\item{side}{Type of the alternative hypothesis, Default: 'two'}
\item{pup}{Maximum probability for the range of x-axis, Default: 0.999}
\item{dig}{Number of digits below the decimal point, Default: 4}
\item{ppt}{Number of plot points in the critical region, Default: 20}
}
\value{
None.
}
\description{
Plot the Result of Chi-square Test.
}
\examples{
# Goodness-of-fit Test
x = c(31,26,22,18,13,10)
(ct = chisq.test(x))
chitest.plot2(stat=ct$stat, df=ct$para, side="up")
# Test of Homogeneity
x = c(20,16,29,21,14, 14,22,26,25,13, 18,24,32,18, 8, 8,18,33,16,25)
x = matrix(x, nrow=4, ncol=5, byrow=TRUE)
(ct = chisq.test(x))
chitest.plot2(stat=ct$stat, df=ct$para, side="up")
}
|
694b8974072739d07d93e2d93a12d2a5d9fc714d
|
28a2590fb4e6f6bb331d8537e6d4a94eff362d09
|
/r/coviz-ponge/app/app.R
|
355e594aeefc6623a71b591942a8b51c4c864080
|
[] |
no_license
|
timueh/sars-cov2-modelling-initiative
|
b611c75b4f14510c87c58cde8263742def72e1a2
|
c47f6c8c8b8f1975bed43608f6e753f499d244db
|
refs/heads/master
| 2022-12-06T16:23:46.112057
| 2020-08-27T10:34:15
| 2020-08-27T10:34:15
| 255,846,969
| 8
| 6
| null | 2020-05-04T11:17:11
| 2020-04-15T08:03:15
|
HTML
|
UTF-8
|
R
| false
| false
| 11,822
|
r
|
app.R
|
## COVID-19 German forecasting tool
## Johannes Ponge, Till sahlmüller European Research Center for Information Systems (ERCIS) at Muenster University (johannes.ponge@uni-muenster.de), March 2020
## includes code adapted from the following sources:
#https://github.com/eparker12/nCoV_tracker/
# load required packages
library(shiny)
library(geojsonio)
library(leaflet)
library(shinyWidgets)
library(shinydashboard)
library(shinyjs)
library(shinythemes)
library(dplyr)
library(rgdal)
library(ggplot2)
library(lubridate)
library(rmapshaper)
library(sp)
### COLORING ###
covid_col = "#cc4c02"
### APP STATE VARIABLES ###
projection_running <<- FALSE # flag to indicate that a projection was started
### DATA PROCESSING ###
# load data
source("data_loader.R")
# load projection model
#source("graph.R")
source("model.R")
# load map
# load simplified geojson
german_districts <- geojson_read("data/geo/json/landkreise-in-germany_small.geojson", what = "sp")
# cases aggregated by day
daily_cases = covid_cases %>%
group_by(date) %>%
summarize(cum_infections = sum(new_infections), cum_recoveries = sum(new_recoveries)) %>%
mutate(active = cum_infections - cum_recoveries)
casesMinDate = min(daily_cases$date)
casesMaxDate = max(daily_cases$date)
### MAP FUNCTIONS ###
# function to plot cumulative dailyCases cases by date
cumulative_plot = function(daily_cases, plot_date) {
plot_df = subset(daily_cases, date<=plot_date)
g1 = ggplot(plot_df, aes(x = date, y = cum_infections, color = covid_col, group = 1)) + geom_line() + geom_point(size = 1, alpha = 0.8) +
ylab("Daily Cumulative Cases") + theme_bw() +
scale_colour_manual(values=c(covid_col)) +
scale_y_continuous(labels = function(l) {trans = l / 1000; paste0(trans, "K")}) +
theme(legend.title = element_blank(), legend.position = "", plot.title = element_text(size=10),
plot.margin = margin(5, 12, 5, 5))
g1
}
# map with RKI infection data
rki_basemap <- leaflet(options = leafletOptions(zoomControl = FALSE)) %>%
setView(lng = 8, lat = 50, zoom = 6) %>%
addTiles()
# map with projected infection data
projection_basemap <- leaflet(options = leafletOptions(zoomControl = FALSE)) %>%
setView(lng = 8, lat = 50, zoom = 6) %>%
addTiles()
# redraw shapes in map with new data
update_map <- function(map_id, cases){
# retrieve map object through leaflet proxy
map <- leafletProxy(map_id)
# compute cases per county
district_infections = cases %>%
group_by(ags) %>%
summarize(cum_infections = sum(new_infections)) %>%
mutate(ags = as.character(ags))
# compute visualization
viz_district = as.data.frame(german_districts$cca_2) %>%
mutate(ags = as.character(german_districts$cca_2)) %>%
select(ags) %>%
left_join(district_infections) %>%
mutate(cum_infections = replace(cum_infections, is.na(cum_infections), 0)) # replace N/A values with 0
# compute bins for map fill color
binsNo <- 7
uniqueCounts <- sort(unique(viz_district$cum_infections))
uniqueNo <- length(uniqueCounts)
r <- uniqueNo %% binsNo
s <- floor(uniqueNo / binsNo)
#compute row numbers of values ot make bins
if(uniqueNo <= binsNo){
rows <- c(1:uniqueNo)
} else {
rows <- unique(c(seq(0, (s + 1) * r, s + 1), seq((s + 1) * r, uniqueNo, s)))
}
# filter for rows with pivotal numbers for bins
bins <- data.frame(bin = uniqueCounts) %>%
filter(row_number() %in% rows)
if(uniqueNo <= binsNo)
bins = data.frame(bin = uniqueCounts)
# color palette
pal <- colorBin(palette = "Reds", domain = viz_district$cum_infections, bins = unique(c(0,1,bins$bin)))
# remove shapes & controls and redraw them
map %>%
clearShapes() %>%
clearControls() %>%
addPolygons(data = german_districts, stroke = FALSE, smoothFactor = 0.3, fillOpacity = 0.6,
label = ~paste0(name_2, " (", viz_district$cum_infections, " cases)"), color = ~pal(viz_district$cum_infections)) %>%
addLegend(pal = pal, values = viz_district$cum_infections, opacity = 0.7, title = "Infections per District",
position = "bottomright")
}
### PROJECTION FUNTIONS ###
project_spread <- function(start_date, commutes_frac, air_frac, beta, mu, delta_t, days){
# initial situation based on RKI data on selected date
# configure dataframe with initial infections to run projection from
initial_scenario <- init_projection_at_date(covid_cases, start_date)
# run projection
projected_cases = run_projection(initial_scenario, commutes_frac, air_frac, beta, mu, delta_t, days)
# add day to projected cases
projected_cases = projected_cases %>%
mutate(date = start_date + days(day))
update_map("projection_map", projected_cases)
}
# create Shiny ui
ui <- navbarPage(theme = shinytheme("flatly"), collapsible = TRUE,
"CoViz Germany 1.0", id="nav",
# Reported Cases
tabPanel(
title = "Reported Cases",
value = "reported",
div(class="outer",
tags$head(includeCSS("styles.css")),
leafletOutput("rki_map", width="100%", height="100%"),
absolutePanel(id = "reported_controls", class = "panel panel-default",
top = 80, left = 20, width = 250, fixed=TRUE,
draggable = TRUE, height = "auto",
h3("Quick Info", align = "left"),
plotOutput("cumulative_plot", height="130px", width="100%"),
sliderInput("plot_date",
label = h5("Select Mapping Date"),
min = casesMinDate,
max = casesMaxDate,
value = casesMaxDate,
timeFormat = "%d %b"
),
h5("Project spread from here..."),
actionButton("switch_tab_projection", "Switch to Projection")
)
)
),
# Projected Cases
tabPanel(shinyjs::useShinyjs(),
title = "Projection",
value = "projected",
div(class="outer",
tags$head(includeCSS("styles.css")),
leafletOutput("projection_map", width="100%", height="100%"),
absolutePanel(id = "projection_controls", class = "panel panel-default",
top = 80, left = 20, width = 250, fixed=TRUE,
draggable = TRUE, height = "auto",
h3("Configuration", align = "left"),
sliderInput("projection_start_date",
label = h5("Projection Start Date (Based on RKI Data)"),
min = casesMinDate,
max = casesMaxDate,
value = casesMaxDate,
timeFormat = "%d %b"
),
sliderInput("projection_duration",
label = h5("Projection Duration (Days)"),
min = 0,
max = 15,
value = 5
),
sliderInput("beta",
label = h5("Infection Rate β"),
min = 0,
max = 1,
value = 0.53
),
sliderInput("mu",
label = h5("Recovery Rate μ"),
min = 0,
max = 1,
value = 0.02
),
sliderInput("commutes_frac",
label = h5("Fraction of Commutes"),
min = 0,
max = 1,
value = 1
),
sliderInput("air_frac",
label = h5("Fraction of Air-Travel"),
min = 0,
max = 1,
value = 1
),
sliderInput("delta_t",
label = h5("Fraction of time at home"),
min = 0,
max = 1,
value = 0.5
),
actionButton("toggle_run_projection", "Run Projection")
)
)
),
tabPanel(
title = "About",
value = "About",
div(class="about",
tags$head(includeCSS("styles.css")),
withMathJax(includeMarkdown("about.md"))
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
## MAPS ##
# map with RKI data
output$rki_map <- renderLeaflet({
rki_basemap
})
# projection map
output$projection_map <- renderLeaflet({
projection_basemap
})
## PLOTS ##
# plot daily cumulative cases
output$cumulative_plot <- renderPlot({
cumulative_plot(daily_cases, input$plot_date)
})
## BUTTONS ##
# switch to projection
observeEvent(input$switch_tab_projection, {
updateTabsetPanel(session, "nav", selected = "projected")
# copy current plot date
updateSliderInput(session, "projection_start_date", value = input$plot_date, timeFormat = "%d %b")
})
# run projection
observeEvent(input$toggle_run_projection, {
projection_running <<- !projection_running
if(projection_running){
# disable all controls
shinyjs::disable("projection_start_date")
shinyjs::disable("projection_duration")
shinyjs::disable("beta")
shinyjs::disable("mu")
shinyjs::disable("commutes_frac")
shinyjs::disable("air_frac")
shinyjs::disable("delta_t")
# relable run projection button
updateActionButton(session, "toggle_run_projection", label = "Reset Projection")
# run projection
project_spread(start_date = input$projection_start_date,
commutes_frac = input$commutes_frac,
air_frac = input$air_frac,
beta = input$beta,
mu = input$mu,
delta_t = input$delta_t,
days = input$projection_duration)
}
else {
# enable all controls
shinyjs::enable("projection_start_date")
shinyjs::enable("projection_duration")
shinyjs::enable("beta")
shinyjs::enable("mu")
shinyjs::enable("commutes_frac")
shinyjs::enable("air_frac")
shinyjs::enable("delta_t")
# relable run projection button
updateActionButton(session, "toggle_run_projection", label = "Run Projection")
}
})
## SLIDERS ##
# reported cases slider
observeEvent(input$plot_date, {
# update map on slider change
filtered_cases = covid_cases %>% filter(as.POSIXct(date) < input$plot_date)
update_map("rki_map", filtered_cases)
})
# projected cases slider
observeEvent(input$projection_start_date, {
# update map on slider change
filtered_cases = covid_cases %>% filter(as.POSIXct(date) < input$projection_start_date)
update_map("projection_map", filtered_cases)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
7c0c870383131b2d0af8492e0a05fef5744f1753
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/GenForImp/R/missing.gen.R
|
089f0d0b46dbe152a27b61f046ca29183570782d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 271
|
r
|
missing.gen.R
|
missing.gen <-
function(mat, nummiss){
p <- ncol(mat)
repeat{
mmiss <- missing.gen0(mat, nummiss)
ind.na <- is.na(mmiss)
max.na <- max(as.numeric(names(table(apply(ind.na, 1, function(x) table(x)["TRUE"])) )))
if(max.na < p) {break}
}
mmiss
}
|
abb7ed1f78f178e0ea8474f163b7737ddbafd32b
|
7f83f684b76b225e21f00ef721f846d371025521
|
/Atividade_2_5.R
|
171cc7e0c4cea4f9b6c6eb1570f5d0ef69b8be1c
|
[] |
no_license
|
vladmonteiro/eletiva_analise_de_dados
|
2225771ba7c145742f478d1835fc3852605f6c7d
|
650d283a68148ece6fda730f2fda237ea6c67d04
|
refs/heads/master
| 2023-06-10T13:56:19.802780
| 2021-07-05T02:33:37
| 2021-07-05T02:33:37
| 356,731,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 797
|
r
|
Atividade_2_5.R
|
library(poliscidata)
# carregamento do banco world
banco2 <- world
# comando para listar todos os países incluídos na base dados banco2
banco2$country
# comando que indica países que registram IDH abaixo de 0,500
banco2$hdi<=0.5
# comando que indica o tipo de regime atribuído a cada país da base
banco2[ , "dem_level4", drop = FALSE]
# comando que indica quais valores da variáveil dem_level14 correspondem a
# "authoritarian"
banco2$dem_level4 == "Authoritarian"
# comando que indica países com regime autoritário localizado na Ásia ou na África
banco2$dem_level4 == "Authoritarian" & banco2$regionun =="Asia" | banco2$regionun == "Africa"
# comando que indica observações da variável regilion cujo valor corresponde a
# a "mulsim"
match(banco2$religoin, "Muslim")
|
84214d0aaad4a66861f30e90caa61b408c2fe0c4
|
ff69d89ee00a965096ca3aba779a527f748e74de
|
/2017.09 DAR analysis TEMPLATE.R
|
712c57eeed18d5222492b0a341227e0257d695aa
|
[] |
no_license
|
jchap14/ATACseq-Analysis
|
5cb1fe63caa0ca2eaaa3969b5d32f494b88a03a1
|
86a5d2896eb2242cd41382a76d7d05d20860bf1c
|
refs/heads/master
| 2021-04-03T06:44:20.542323
| 2018-03-16T06:06:33
| 2018-03-16T06:06:33
| 124,723,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,182
|
r
|
2017.09 DAR analysis TEMPLATE.R
|
##########################################################################################
########### DETERMINE DIFFERENTIALLY ACCESSIBLE (DA) REGIONS and GENERATE FIGS ###########
##########################################################################################
############################# Step 1: Always do this on the cluster ####
screen -S R_session
qlogin -l h_vmem=10G -l h_rt=24:00:00
module add r
cd /srv/gsfs0/projects/snyder/chappell/JR/SLO_newATAC/siKLF_ETS_analysis #or CWD
R
library("DiffBind")
##### READ IN PEAKSETS with *.csv containing metadata
## set experiment title
Title <- "siKLF_ETS"
metadata <- read.csv(paste(Title, ".metadata.csv", sep='')) #metadata has to match exact format as example
# have to set minOverlap to <2 (default) if wanting to test a lesser overlap
# this may cause a memory failure if qlogin not working well
treatment <- dba(sampleSheet=metadata, minOverlap=3)
## generate a correlation heatmap (initial clustering of peaks)
pdf(paste(Title,".peaks.correlation.heatmap.pdf", sep=''), width= 8, height= 8, pointsize= 14)
plot(treatment, main= "occupancy.correlation.heatmap")
dev.off()
##### generate plot to determine how many peaks overlap between samples
olap.rate <- dba.overlap(treatment,mode=DBA_OLAP_RATE)
pdf(paste(Title,".Peak-overlap-rate.plot.pdf", sep=''), width= 5, height= 5, pointsize= 14)
plot(olap.rate,type='b',ylab='# peaks',xlab='Overlap at least this many peaksets')
dev.off()
# ##### get a dataframe of the full peakset used for DA testing
# full_overlapped_peaks<- dba.peakset(treatment, peaks= NULL, bRetrieve=T,
# minOverlap=1, DataType= DBA_DATA_FRAME)
## export for annotation
# write.table(full_overlapped_peaks, "SLO.full_peakset", sep="\t", row.names= F)
# export pre-counts DBA object for local manipulation. This may cause issues locally.
# dba.save(treatment, file='SLO_ATAC', dir='.', pre='dba_', ext='RData', bMinimize=FALSE)
##### COUNT READS UNDER MERGED (or CONSENSUS) PEAKSET
treatment <- dba.count(treatment, minOverlap=3, bParallel=T)
## get the full matrix of counts & export them for local manipulation
counts.matrix <- dba.peakset(treatment, bRetrieve=T, DataType=DBA_DATA_FRAME)
write.table(counts.matrix, paste(Title,".counts.matrix",sep=''), sep="\t", row.names= F)
## generate a correlation heatmap (affinity clustering takes reads into account)
pdf(paste(Title,".affinity.correlation.heatmap.pdf", sep=''), width= 8, height= 8, pointsize = 14)
plot(treatment)
dev.off()
############### DIFFERENTIAL accessibility analysis locally ####
## set experiment title
Title <- "siKLF_ETS"
## read in metadata
metadata <- read.delim(paste(Title, ".metadata.csv", sep=''), quote="\"'", sep = ",")
##### Read in the count matrix
y <- read.delim(paste(Title, ".counts.matrix", sep=''), quote="\"'")
## ID columns containing count info
CountCols <- as.character(metadata$SampleID)
## create matrix from count containing columns
y.matrix <- as.matrix(y[,CountCols])
class(y.matrix) <- "integer"
# ##### construct a DESeq2 DataSet w/ count matrix for ATAC without blocking batch
require("DESeq2")
dds <- DESeqDataSetFromMatrix(countData= y.matrix, colData= metadata,
design= ~Condition) # ~batch + Condition (if desired)
dds <- dds[ rowSums(counts(dds)) > 1, ] ## pre-filter rows that have only 0 or 1 read
dds <- DESeq(dds) #run differential test
##### perform rlog transform of counts matrix to adjust for PCA/heatmap
rld <- rlog(dds)
log2.rlog.counts <- assay(rld); x <- log2.rlog.counts
## If desired: remove batch effect, merge corrected rLog counts w/ gene names for later incorporation
# x <- removeBatchEffect(x, batch= as.character(metadata$Treatment)) #batch by date of prep
# x.symbols <- cbind(y[,c(1:3)], x)
##### PCA: PC2 vs PC1 ####
## assign a numeric matrix to mtrx
mtrx <- x
## Calculations
require("genefilter")
rv <- rowVars(mtrx)
## select # of genes to consider for PCA (change max 2 min to specify top 500)
select <- order(rv, decreasing= T)[seq_len(min(10000, length(rv)))]
pca <- prcomp(t(mtrx[select, ]))
percentVar <- pca$sdev^2/sum(pca$sdev^2)
d <- data.frame(PC1 = pca$x[, 1], PC2 = pca$x[, 2], PC3 = pca$x[, 3], PC4 = pca$x[, 4], metadata)
## specify plot aesthetics
require("ggplot2")
color <- "Condition"
label <- d$SampleID
shape <- "Treatment"
mainTitle <- "PCA"
textSize <- element_text(size= 14)
## Plot
a <- ggplot(d, aes_string(x= "PC1", y= "PC2", color=color, shape=shape, label=label)) +
geom_point(size= 3) + xlab(paste0("PC1: ", round(percentVar[1] * 100), "% variance")) +
ylab(paste0("PC2: ", round(percentVar[2] * 100), "% variance")) + coord_fixed() +
geom_text(aes(label=label),hjust=0, vjust=0, size= 5) + ggtitle(mainTitle) +
theme(plot.title= element_text(size= 14, face= "bold"), axis.text= textSize,
legend.text= textSize, legend.title= textSize, axis.title= textSize, strip.text= textSize)
plot(a)
## Export to powerpoint
require("export")
graph2ppt(file=paste(Title, ".PCA.pptx",sep=''), width=10, height=7, append=T)
##### Transcriptome PCA: PC3 vs PC2 ####
## Plot
a <- ggplot(d, aes_string(x= "PC3", y= "PC4", color=color, shape=shape, label=label)) +
geom_point(size= 3) + xlab(paste0("PC2: ", round(percentVar[2] * 100), "% variance")) +
ylab(paste0("PC3: ", round(percentVar[3] * 100), "% variance")) + coord_fixed() +
geom_text(aes(label=label),hjust=0, vjust=0, size= 5) + ggtitle(mainTitle) +
theme(plot.title= element_text(size= 14, face= "bold"), axis.text= textSize,
legend.text= textSize, legend.title= textSize, axis.title=textSize, strip.text=textSize)
plot(a)
## Export to powerpoint
graph2ppt(file=paste(Title, ".PCA.pptx",sep=''), width=10, height=7, append=T)
##### Heatmap of top variable genes ####
x <- x[select, ] #set x to the top variable genes from above
## cluster rows by pearson, complete
hr <- hclust(as.dist(1-cor(t(x), method='pearson')), method='complete')
hc <- hclust(as.dist(1-cor(x, method='pearson')), method='complete')
## Heatmap2 w/ color bar. h & k modify cutree (k overrides h). Specify margins here as well.
mycl <- cutree(hr, h=max(hr$height)/3, k = 5);
mycol <- sample(rainbow(256)); mycol <- mycol[as.vector(mycl)]
my_palette <- colorRampPalette(c("blue", "black", "yellow"))(n = 299)
## generate heatmap with rows clustered, but not columns
require(gplots)
png('10ktopVarPeaks.heatmap_r.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=NA, dendrogram= c("row"), col=my_palette, scale="row",
density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## generate a heatmap with rows & columns clustered
png('10ktopVarPeaks.heatmap_rc.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), dendrogram= c("both"), col=my_palette,
scale="row", density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## plot & export column dendrograms
require(dendextend)
hc.dend <- hc %>% as.dendrogram #convert to dendrogram
plot(hc.dend, main = "Column Dendrogram")
graph2ppt(file="10ktopVarPeaks.dendrograms.pptx", width=10, height=7, append=T)
## plot & export row dendrograms
hr.dend <- hr %>% as.dendrogram #convert to dendrogram
hr.dend.cut <- cut(hr.dend, h= 1.9) #cut at desired height
plot(hr.dend.cut$upper, horiz = T, main = "Row Dendrogram")
graph2ppt(file="10ktopVarPeaks.dendrograms.pptx", width=10, height=7, append=T)
#################################################################################################
##### DA Testing: LS_vs_ST ####
res <- results(dds, contrast=c("Condition","LS","ST"))
summary(res)
resMF_df <- as.data.frame(res)
## merge significant results w/ gene names & rlog counts
resMF_counts <- cbind(resMF_df, x.symbols)
res.all <- resMF_counts[order(resMF_counts$padj),] #order the results by the smallest adj pval
DARs.LS_vs_ST.all_res <- res.all
##### CHIPSEEKER PACKAGE TO ANNOTATE PEAKS
FDR <- 0.01 #set desired FDR for remaining analysis here
DARs.LS_vs_ST <- subset(res.all, padj < FDR)
##### convert to gRanges objects
DARs.LS_vs_ST.gRanges <- makeGRangesFromDataFrame(DARs.LS_vs_ST,keep.extra.columns=T)
promoter <- getPromoters(TxDb=txdb, upstream=2500, downstream=500)
##### ANNOTATE DARs, subset BEDs for Motif search, & export
DARs.LS_vs_ST.anno <- annotatePeak(DARs.LS_vs_ST.gRanges, tssRegion=c(-2500, 500), TxDb=txdb, annoDb="org.Hs.eg.db")
DARs.LS_vs_ST.anno.df <- as.data.frame(DARs.LS_vs_ST.anno)
## calculate average read counts for LS & ST
DARs.LS_vs_ST.anno.df$LS_avg <- rowMeans(DARs.LS_vs_ST.anno.df[,c(12:14)])
DARs.LS_vs_ST.anno.df$ST_avg <- rowMeans(DARs.LS_vs_ST.anno.df[,c(17:20)])
## reorder columns for easy interpretation
DARs.LS_vs_ST.anno.df <- DARs.LS_vs_ST.anno.df[,c(1:4,7,11,31,29,33:34,12:13,15:16,19,14,17,18,20,26,22:28,30,32)]
## export annotated DARs
write.table(DARs.LS_vs_ST.anno.df, "DARs.LS_vs_ST.FDR1.txt", sep="\t", row.names=F)
##### Subset DARs by LS- or ST- enrichment & make BEDs for Motif search
## LS-enriched DARs
DARs.LS_vs_ST.LS_enriched <- subset(DARs.LS_vs_ST.anno.df, log2FoldChange > 0)
DARs.LS_vs_ST.LS_enriched.bed <- DARs.LS_vs_ST.LS_enriched[,c(1:3)]
## add a name_distToTSS column to bed for ID after motif finding
DARs.LS_vs_ST.LS_enriched.bed$namedist <- paste(DARs.LS_vs_ST.LS_enriched$SYMBOL,
DARs.LS_vs_ST.LS_enriched$distanceToTSS, sep='_')
write.table(DARs.LS_vs_ST.LS_enriched.bed, "DARs.LS_vs_ST.LS_enriched.bed", sep="\t", row.names=F, col.names=F, quote=F)
## ST-enriched DARs
DARs.LS_vs_ST.ST_enriched <- subset(DARs.LS_vs_ST.anno.df, log2FoldChange < -0)
DARs.LS_vs_ST.ST_enriched.bed <- DARs.LS_vs_ST.ST_enriched[,c(1:3)]
## add a name_distToTSS column to bed for ID after motif finding
DARs.LS_vs_ST.ST_enriched.bed$namedist <- paste(DARs.LS_vs_ST.ST_enriched$SYMBOL,
DARs.LS_vs_ST.ST_enriched$distanceToTSS, sep='_')
write.table(DARs.LS_vs_ST.ST_enriched.bed, "DARs.LS_vs_ST.ST_enriched.bed", sep="\t", row.names=F, col.names=F, quote=F)
##### Subset DARs by Proximal or Distal (>3kb) & make BEDs for Motif search
## subset TSS_proximal (within -/+ 3kb) DARs
DARs.LS_vs_ST.LS_enriched.TSSprox <- subset(DARs.LS_vs_ST.LS_enriched, abs(distanceToTSS) < 3000)
DARs.LS_vs_ST.LS_enriched.TSSdist <- subset(DARs.LS_vs_ST.LS_enriched, abs(distanceToTSS) > 3000)
DARs.LS_vs_ST.ST_enriched.TSSdist <- subset(DARs.LS_vs_ST.ST_enriched, abs(distanceToTSS) > 3000)
DARs.LS_vs_ST.ST_enriched.TSSprox <- subset(DARs.LS_vs_ST.ST_enriched, abs(distanceToTSS) < 3000)
##### VISUALIZE GENOMIC ANNOTATION
## create LS- & ST- annotated objects
DARs.LS_vs_ST.LS.gRanges <- makeGRangesFromDataFrame(DARs.LS_vs_ST.LS_enriched.bed)
DARs.LS_vs_ST.ST.gRanges <- makeGRangesFromDataFrame(DARs.LS_vs_ST.ST_enriched.bed)
DARs.LS_vs_ST.LS.anno <- annotatePeak(DARs.LS_vs_ST.LS.gRanges, tssRegion=c(-2500, 500),
TxDb=txdb, annoDb="org.Hs.eg.db")
DARs.LS_vs_ST.ST.anno <- annotatePeak(DARs.LS_vs_ST.ST.gRanges, tssRegion=c(-2500, 500),
TxDb=txdb, annoDb="org.Hs.eg.db")
## make a VennPie graphic
vennpie(DARs.LS_vs_ST.LS.anno) ## generates genome annotation vennpie chart
graph2ppt(file="DARs.LS_vs_ST.LS_genome_anno.ppt", width=7, height=7, append=T); dev.off()
vennpie(DARs.LS_vs_ST.ST.anno) ## generates genome annotation vennpie chart
graph2ppt(file="DARs.LS_vs_ST.ST_genome_anno.ppt", width=7, height=7, append=T); dev.off()
##### COMPARISON OF MULTIPLE PEAKSETS
fileslist <- list(DARs.LS_vs_ST.LS=DARs.LS_vs_ST.LS.gRanges,
DARs.LS_vs_ST.ST=DARs.LS_vs_ST.ST.gRanges)
promoter <- getPromoters(TxDb=txdb, upstream=3000, downstream=3000)
tagMatrixList <- lapply(fileslist, getTagMatrix, windows=promoter)
##### AVERAGE PROFILES
pdf('DARs.LS_vs_ST.avg_binding.pdf', width= 10, height= 6, pointsize= 14)
plotAvgProf(tagMatrixList, xlim=c(-3000, 3000))
dev.off()
##### PEAK HEATMAPS
png('DARs.LS_vs_ST.binding_heatmaps.png', width= 6, height= 10, units= "in", res= 300, pointsize= 14)
tagHeatmap(tagMatrixList, xlim=c(-3000, 3000), color=NULL)
dev.off()
##### PEAK ANNOTATION COMPARISION
peakAnnoList <- lapply(fileslist, annotatePeak, TxDb=txdb, tssRegion=c(-3000, 3000), verbose=F)
#
plotAnnoBar(peakAnnoList)
graph2ppt(file="DARs.LS_vs_ST.annotate_compare_bar.ppt", width=10, height=6, append=T)
dev.off()
#
plotDistToTSS(peakAnnoList)
graph2ppt(file="DARs.LS_vs_ST.annotate_compare_bar.ppt", width=10, height=6, append=T)
dev.off()
##### DAR Heatmap
x <- as.matrix(DARs.LS_vs_ST.anno.df[,c(11:19)])
## cluster rows by pearson, complete
hr <- hclust(as.dist(1-cor(t(x), method='pearson')), method='complete')
hc <- hclust(as.dist(1-cor(x, method='pearson')), method='complete')
## Heatmap2 w/ color bar. h & k modify cutree (k overrides h). Specify margins here as well.
mycl <- cutree(hr, h=max(hr$height)/3, k = 5);
mycol <- sample(rainbow(256)); mycol <- mycol[as.vector(mycl)]
my_palette <- colorRampPalette(c("blue", "black", "yellow"))(n = 299)
## generate heatmap with rows clustered, but not columns
png('DARs.LS_vs_ST.heatmap_r.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=NA, dendrogram= c("row"), col=my_palette, scale="row",
density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## generate a heatmap with rows & columns clustered
png('DARs.LS_vs_ST.heatmap_rc.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), dendrogram= c("both"), col=my_palette,
scale="row", density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## plot & export column dendrograms
hc.dend <- hc %>% as.dendrogram #convert to dendrogram
plot(hc.dend, main = "Column Dendrogram")
graph2ppt(file="DARs.LS_vs_ST.dendrograms.pptx", width=10, height=7, append=T)
## plot & export row dendrograms
hr.dend <- hr %>% as.dendrogram #convert to dendrogram
hr.dend.cut <- cut(hr.dend, h= 1.9) #cut at desired height
plot(hr.dend.cut$upper, horiz = T, main = "Row Dendrogram")
graph2ppt(file="DARs.LS_vs_ST.dendrograms.pptx", width=10, height=7, append=T)
##### DA Testing: LS_vs_OS ####
res <- results(dds, contrast=c("Condition","LS","OS"))
summary(res)
resMF_df <- as.data.frame(res)
## merge significant results w/ gene names & rlog counts
resMF_counts <- cbind(resMF_df, x.symbols)
res.all <- resMF_counts[order(resMF_counts$padj),] #order the results by the smallest adj pval
DARs.LS_vs_OS.all_res <- res.all
##### CHIPSEEKER PACKAGE TO ANNOTATE PEAKS
FDR <- 0.01 #set desired FDR for remaining analysis here
DARs.LS_vs_OS <- subset(res.all, padj < FDR)
##### convert to gRanges objects
DARs.LS_vs_OS.gRanges <- makeGRangesFromDataFrame(DARs.LS_vs_OS,keep.extra.columns=T)
promoter <- getPromoters(TxDb=txdb, upstream=2500, downstream=500)
##### ANNOTATE DARs, subset BEDs for Motif search, & export
DARs.LS_vs_OS.anno <- annotatePeak(DARs.LS_vs_OS.gRanges, tssRegion=c(-2500, 500), TxDb=txdb, annoDb="org.Hs.eg.db")
DARs.LS_vs_OS.anno.df <- as.data.frame(DARs.LS_vs_OS.anno)
# calculate average read counts for LS & OS
DARs.LS_vs_OS.anno.df$LS_avg <- rowMeans(DARs.LS_vs_OS.anno.df[,c(12:14)])
DARs.LS_vs_OS.anno.df$OS_avg <- rowMeans(DARs.LS_vs_OS.anno.df[,c(15:16)])
#reorder columns for easy interpretation
DARs.LS_vs_OS.anno.df <- DARs.LS_vs_OS.anno.df[,c(1:4,7,11,31,29,33:34,12:13,15:16,19,14,17,18,20,26,22:28,30,32)]
#export annotated DARs
write.table(DARs.LS_vs_OS.anno.df, "DARs.LS_vs_OS.FDR1.txt", sep="\t", row.names=F)
##### Subset DARs by LS- or OS- enrichment & make BEDs for Motif search
## LS-enriched DARs
DARs.LS_vs_OS.LS_enriched <- subset(DARs.LS_vs_OS.anno.df, log2FoldChange > 0)
DARs.LS_vs_OS.LS_enriched.bed <- DARs.LS_vs_OS.LS_enriched[,c(1:3)]
## add a name_distToTSS column to bed for ID after motif finding
DARs.LS_vs_OS.LS_enriched.bed$namedist <- paste(DARs.LS_vs_OS.LS_enriched$SYMBOL,
DARs.LS_vs_OS.LS_enriched$distanceToTSS, sep='_')
write.table(DARs.LS_vs_OS.LS_enriched.bed, "DARs.LS_vs_OS.LS_enriched.bed", sep="\t", row.names=F, col.names=F, quote=F)
## OS-enriched DARs
DARs.LS_vs_OS.OS_enriched <- subset(DARs.LS_vs_OS.anno.df, log2FoldChange < -0)
DARs.LS_vs_OS.OS_enriched.bed <- DARs.LS_vs_OS.OS_enriched[,c(1:3)]
## add a name_distToTSS column to bed for ID after motif finding
DARs.LS_vs_OS.OS_enriched.bed$namedist <- paste(DARs.LS_vs_OS.OS_enriched$SYMBOL,
DARs.LS_vs_OS.OS_enriched$distanceToTSS, sep='_')
write.table(DARs.LS_vs_OS.OS_enriched.bed, "DARs.LS_vs_OS.OS_enriched.bed", sep="\t", row.names=F, col.names=F, quote=F)
##### Subset DARs by Proximal or Distal (>3kb) & make BEDs for Motif search
## subset TSS_proximal (within -/+ 3kb) DARs
DARs.LS_vs_OS.LS_enriched.TSSprox <- subset(DARs.LS_vs_OS.LS_enriched, abs(distanceToTSS) < 3000)
DARs.LS_vs_OS.LS_enriched.TSSdist <- subset(DARs.LS_vs_OS.LS_enriched, abs(distanceToTSS) > 3000)
DARs.LS_vs_OS.OS_enriched.TSSdist <- subset(DARs.LS_vs_OS.OS_enriched, abs(distanceToTSS) > 3000)
DARs.LS_vs_OS.OS_enriched.TSSprox <- subset(DARs.LS_vs_OS.OS_enriched, abs(distanceToTSS) < 3000)
##### VISUALIZE GENOMIC ANNOTATION
## create LS- & OS- annotated objects
DARs.LS_vs_OS.LS.gRanges <- makeGRangesFromDataFrame(DARs.LS_vs_OS.LS_enriched.bed)
DARs.LS_vs_OS.OS.gRanges <- makeGRangesFromDataFrame(DARs.LS_vs_OS.OS_enriched.bed)
DARs.LS_vs_OS.LS.anno <- annotatePeak(DARs.LS_vs_OS.LS.gRanges, tssRegion=c(-2500, 500),
TxDb=txdb, annoDb="org.Hs.eg.db")
DARs.LS_vs_OS.OS.anno <- annotatePeak(DARs.LS_vs_OS.OS.gRanges, tssRegion=c(-2500, 500),
TxDb=txdb, annoDb="org.Hs.eg.db")
## make a VennPie graphic
vennpie(DARs.LS_vs_OS.LS.anno) ## generates genome annotation vennpie chart
graph2ppt(file="DARs.LS_vs_OS.LS_genome_anno.ppt", width=7, height=7, append=T); dev.off()
vennpie(DARs.LS_vs_OS.OS.anno) ## generates genome annotation vennpie chart
graph2ppt(file="DARs.LS_vs_OS.OS_genome_anno.ppt", width=7, height=7, append=T); dev.off()
##### COMPARISON OF MULTIPLE PEAKSETS
fileslist <- list(DARs.LS_vs_OS.LS=DARs.LS_vs_OS.LS.gRanges,
DARs.LS_vs_OS.OS=DARs.LS_vs_OS.OS.gRanges)
promoter <- getPromoters(TxDb=txdb, upstream=3000, downstream=3000)
tagMatrixList <- lapply(fileslist, getTagMatrix, windows=promoter)
##### AVERAGE PROFILES
pdf('DARs.LS_vs_OS.avg_binding.pdf', width= 10, height= 6, pointsize= 14)
plotAvgProf(tagMatrixList, xlim=c(-3000, 3000))
dev.off()
##### PEAK HEATMAPS
png('DARs.LS_vs_OS.binding_heatmaps.png', width= 6, height= 10, units= "in", res= 300, pointsize= 14)
tagHeatmap(tagMatrixList, xlim=c(-3000, 3000), color=NULL)
dev.off()
##### PEAK ANNOTATION COMPARISION
peakAnnoList <- lapply(fileslist, annotatePeak, TxDb=txdb, tssRegion=c(-3000, 3000), verbose=F)
#
plotAnnoBar(peakAnnoList)
graph2ppt(file="DARs.LS_vs_OS.annotate_compare_bar.ppt", width=10, height=6, append=T)
dev.off()
#
plotDistToTSS(peakAnnoList)
graph2ppt(file="DARs.LS_vs_OS.annotate_compare_bar.ppt", width=10, height=6, append=T)
dev.off()
##### DAR Heatmap
x <- as.matrix(DARs.LS_vs_OS.anno.df[,c(11:19)])
## cluster rows by pearson, complete
hr <- hclust(as.dist(1-cor(t(x), method='pearson')), method='complete')
hc <- hclust(as.dist(1-cor(x, method='pearson')), method='complete')
## Heatmap2 w/ color bar. h & k modify cutree (k overrides h). Specify margins here as well.
mycl <- cutree(hr, h=max(hr$height)/3, k = 5);
mycol <- sample(rainbow(256)); mycol <- mycol[as.vector(mycl)]
my_palette <- colorRampPalette(c("blue", "black", "yellow"))(n = 299)
## generate heatmap with rows clustered, but not columns
png('DARs.LS_vs_OS.heatmap_r.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=NA, dendrogram= c("row"), col=my_palette, scale="row",
density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## generate a heatmap with rows & columns clustered
png('DARs.LS_vs_OS.heatmap_rc.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), dendrogram= c("both"), col=my_palette,
scale="row", density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## plot & export column dendrograms
hc.dend <- hc %>% as.dendrogram #convert to dendrogram
plot(hc.dend, main = "Column Dendrogram")
graph2ppt(file="DARs.LS_vs_OS.dendrograms.pptx", width=10, height=7, append=T)
## plot & export row dendrograms
hr.dend <- hr %>% as.dendrogram #convert to dendrogram
hr.dend.cut <- cut(hr.dend, h= 1.9) #cut at desired height
plot(hr.dend.cut$upper, horiz = T, main = "Row Dendrogram")
graph2ppt(file="DARs.LS_vs_OS.dendrograms.pptx", width=10, height=7, append=T)
##### DA Testing: ST_vs_OS # finds no DARs with FDR=10% ####
res <- results(dds, contrast=c("Condition","ST","OS"))
summary(res) # finds no DARs with FDR=10%, thus stop here for now
##### Comparisons of the DARs found in LS_vs_ST & LS_vs_OS: DARs.LS_vs_dF.intersection ####
## Read them in again to avoid confusion
DARs.LS_vs_ST.df <- as.data.table(read.delim('DARs.LS_vs_ST.FDR1.txt', quote="\"'", sep = "\t"))
DARs.LS_vs_OS.df <- as.data.table(read.delim('DARs.LS_vs_OS.FDR1.txt', quote="\"'", sep = "\t"))
## Set DAR coords as key
setkey(DARs.LS_vs_ST.df, seqnames, start, end)
setkey(DARs.LS_vs_OS.df, seqnames, start, end)
## intersect DARs
DARs.LS_vs_dF.intersection <- foverlaps(DARs.LS_vs_ST.df, DARs.LS_vs_OS.df, type="any", nomatch=0)
write.table(DARs.LS_vs_dF.intersection, "DARs.LS_vs_df.intersection.txt", sep="\t", row.names=F, col.names=T, quote=F)
##### draw a venn diagram to illustrate overlap
draw.pairwise.venn(area1=4178 , area2=1283 , cross.area= 1187)
graph2ppt(file="DARs.LS_vs_dF.intersection.venn.pptx", width=3, height=3, append=T)
dev.off()
##### scatterplot fold changes & linear regress
ggplot(DARs.LS_vs_dF.intersection, aes(x=DARs.LS_vs_dF.intersection$log2FoldChange,
y=DARs.LS_vs_dF.intersection$i.log2FoldChange)) + geom_point(shape=1) +
geom_smooth(method=lm) + xlab("LS vs OS log2FC") + ylab("LS vs ST log2FC") +
ggtitle("DA of LS vs dF intersection") + theme(plot.title= element_text(size= 14,
face= "bold"), axis.text= element_text(size= 14), legend.text= element_text(size= 14),
legend.title= element_text(size= 14), axis.title= element_text(size= 14),
strip.text= element_text(size= 14))
graph2ppt(file="LS_vs_dF.intersection.scatterplot.ppt", width=3, height=3.15, append=T)
## fit linear model, get r2 (r is Pearson's correlation coefficient)
summary(lm(DARs.LS_vs_dF.intersection$i.log2FoldChange ~ DARs.LS_vs_dF.intersection$log2FoldChange))
##### make a heatmap
x <- as.matrix(DARs.LS_vs_dF.intersection[,c(11:19)])
## cluster rows by pearson, complete
hr <- hclust(as.dist(1-cor(t(x), method='pearson')), method='complete')
hc <- hclust(as.dist(1-cor(x, method='pearson')), method='complete')
## Heatmap2 w/ color bar. h & k modify cutree (k overrides h). Specify margins here as well.
mycl <- cutree(hr, h=max(hr$height)/3, k = 5);
mycol <- sample(rainbow(256)); mycol <- mycol[as.vector(mycl)]
my_palette <- colorRampPalette(c("blue", "black", "yellow"))(n = 299)
## generate a heatmap with rows & columns clustered
png('DARs.LS_vs_dF.intersection.heatmap_rc.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), dendrogram= c("both"), col=my_palette,
scale="row", density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## plot & export column dendrograms
hc.dend <- hc %>% as.dendrogram #convert to dendrogram
plot(hc.dend, main = "Column Dendrogram")
graph2ppt(file="DARs.LS_vs_dF.intersection.dendrograms.pptx", width=10, height=7, append=T)
## plot & export row dendrograms
hr.dend <- hr %>% as.dendrogram #convert to dendrogram
hr.dend.cut <- cut(hr.dend, h= 1.9) #cut at desired height
plot(hr.dend.cut$upper, horiz = T, main = "Row Dendrogram")
graph2ppt(file="DARs.LS_vs_dF.intersection.dendrograms.pptx", width=10, height=7, append=T)
##### Make an condition averaged heatmap from DARs.LS_vs_dF.intersection
## make a heatmap from LS_vs_dF
x <- as.matrix(DARs.LS_vs_dF.intersection[,c(9,10,38)])
## cluster rows by pearson, complete
hr <- hclust(as.dist(1-cor(t(x), method='pearson')), method='complete')
hc <- hclust(as.dist(1-cor(x, method='pearson')), method='complete')
## Heatmap2 w/ color bar. h & k modify cutree (k overrides h). Specify margins here as well.
mycl <- cutree(hr, h=max(hr$height)/3, k = 5);
mycol <- sample(rainbow(256)); mycol <- mycol[as.vector(mycl)]
my_palette <- colorRampPalette(c("blue", "black", "yellow"))(n = 299)
## generate a heatmap with rows & columns clustered
png('DARs.LS_vs_dF.intersection.avg.heatmap_rc.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), dendrogram= c("both"), col=my_palette,
scale="row", density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## plot & export column dendrograms
hc.dend <- hc %>% as.dendrogram #convert to dendrogram
plot(hc.dend, main = "Column Dendrogram")
graph2ppt(file="DARs.LS_vs_dF.intersection.avg.dendrograms.pptx", width=10, height=7, append=T)
## plot & export row dendrograms
hr.dend <- hr %>% as.dendrogram #convert to dendrogram
hr.dend.cut <- cut(hr.dend, h= 1.9) #cut at desired height
plot(hr.dend.cut$upper, horiz = T, main = "Row Dendrogram")
graph2ppt(file="DARs.LS_vs_dF.intersection.avg.dendrograms.pptx", width=10, height=7, append=T)
##### Split DARs.LS_vs_dF.intersection by LS or dF enriched for MOTIF finding ####
##### LS-enriched
DARs.LS_vs_dF.intersection.LS_enriched <- subset(DARs.LS_vs_dF.intersection, log2FoldChange > 0)
DARs.LS_vs_dF.intersection.LS_enriched.bed <- DARs.LS_vs_dF.intersection.LS_enriched[,c(1:3,7,8)]
## add a name_distToTSS column to bed for ID after motif finding
DARs.LS_vs_dF.intersection.LS_enriched.bed$namedist <- paste(DARs.LS_vs_dF.intersection.LS_enriched$SYMBOL,
DARs.LS_vs_dF.intersection.LS_enriched$distanceToTSS, sep='_')
write.table(DARs.LS_vs_dF.intersection.LS_enriched.bed[,c(1:3,6)], "DARs.LS_vs_dF.intersection.LS_enriched.bed", sep="\t", row.names=F, col.names=F, quote=F)
##### dF-enriched
DARs.LS_vs_dF.intersection.dF_enriched <- subset(DARs.LS_vs_dF.intersection, log2FoldChange < 0)
DARs.LS_vs_dF.intersection.dF_enriched.bed <- DARs.LS_vs_dF.intersection.dF_enriched[,c(1:3,7,8)]
## add a name_distToTSS column to bed for ID after motif finding
DARs.LS_vs_dF.intersection.dF_enriched.bed$namedist <- paste(DARs.LS_vs_dF.intersection.dF_enriched$SYMBOL,
DARs.LS_vs_dF.intersection.dF_enriched$distanceToTSS, sep='_')
write.table(DARs.LS_vs_dF.intersection.dF_enriched.bed[,c(1:3,6)], "DARs.LS_vs_dF.intersection.dF_enriched.bed", sep="\t", row.names=F, col.names=F, quote=F)
##### Comparisons of the DARs found in either LS_vs_ST or LS_vs_OS: DARs.LS_vs_dF.union ####
DARs.LS_vs_ST.df_sub <- DARs.LS_vs_ST.df[,c(1:3,11:19)]
DARs.LS_vs_OS.df_sub <- DARs.LS_vs_OS.df[,c(1:3,11:19)]
DARs.LS_vs_dF.union <- unique(rbind(DARs.LS_vs_ST.df_sub,DARs.LS_vs_OS.df_sub))
## calculate average values for each condition
DARs.LS_vs_dF.union$LS_avg <- rowMeans(DARs.LS_vs_dF.union[,c(4,5,9)])
DARs.LS_vs_dF.union$OS_avg <- rowMeans(DARs.LS_vs_dF.union[,c(6,7)])
DARs.LS_vs_dF.union$ST_avg <- rowMeans(DARs.LS_vs_dF.union[,c(8,10:12)])
## make a heatmap
x <- as.matrix(DARs.LS_vs_dF.union[,c(4:12)])
## cluster rows by pearson, complete
hr <- hclust(as.dist(1-cor(t(x), method='pearson')), method='complete')
hc <- hclust(as.dist(1-cor(x, method='pearson')), method='complete')
## Heatmap2 w/ color bar. h & k modify cutree (k overrides h). Specify margins here as well.
mycl <- cutree(hr, h=max(hr$height)/3, k = 5);
mycol <- sample(rainbow(256)); mycol <- mycol[as.vector(mycl)]
my_palette <- colorRampPalette(c("blue", "black", "yellow"))(n = 299)
## generate a heatmap with rows & columns clustered
png('DARs.LS_vs_dF.union.heatmap_rc.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), dendrogram= c("both"), col=my_palette,
scale="row", density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## plot & export column dendrograms
hc.dend <- hc %>% as.dendrogram #convert to dendrogram
plot(hc.dend, main = "Column Dendrogram")
graph2ppt(file="DARs.LS_vs_dF.union.dendrograms.pptx", width=10, height=7, append=T)
## plot & export row dendrograms
hr.dend <- hr %>% as.dendrogram #convert to dendrogram
hr.dend.cut <- cut(hr.dend, h= 1.9) #cut at desired height
plot(hr.dend.cut$upper, horiz = T, main = "Row Dendrogram")
graph2ppt(file="DARs.LS_vs_dF.union.dendrograms.pptx", width=10, height=7, append=T)
##### Make an condition averaged heatmap from DARs.LS_vs_dF.union
## make a heatmap from LS_vs_dF
x <- as.matrix(DARs.LS_vs_dF.union[,c(13:15)])
## cluster rows by pearson, complete
hr <- hclust(as.dist(1-cor(t(x), method='pearson')), method='complete')
hc <- hclust(as.dist(1-cor(x, method='pearson')), method='complete')
## Heatmap2 w/ color bar. h & k modify cutree (k overrides h). Specify margins here as well.
mycl <- cutree(hr, h=max(hr$height)/3, k = 5);
mycol <- sample(rainbow(256)); mycol <- mycol[as.vector(mycl)]
my_palette <- colorRampPalette(c("blue", "black", "yellow"))(n = 299)
## generate a heatmap with rows & columns clustered
png('DARs.LS_vs_dF.union.avg.heatmap_rc.png', width= 7, height= 7, units= "in", res= 300, pointsize= 14)
heatmap.2(x,Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc), dendrogram= c("both"), col=my_palette,
scale="row", density.info="none", trace="none", RowSideColors=mycol, margins= c(20, 5)) #margins c(height,width)
dev.off()
## plot & export column dendrograms
hc.dend <- hc %>% as.dendrogram #convert to dendrogram
plot(hc.dend, main = "Column Dendrogram")
graph2ppt(file="DARs.LS_vs_dF.union.avg.dendrograms.pptx", width=10, height=7, append=T)
## plot & export row dendrograms
hr.dend <- hr %>% as.dendrogram #convert to dendrogram
hr.dend.cut <- cut(hr.dend, h= 1.9) #cut at desired height
plot(hr.dend.cut$upper, horiz = T, main = "Row Dendrogram")
graph2ppt(file="DARs.LS_vs_dF.union.avg.dendrograms.pptx", width=10, height=7, append=T)
##### Generate the union of DARs to test some correlations ####
## import DA test results & fix names
LS_v_ST.fullDAtest <- DARs.LS_vs_ST.all_res[,c("CHR","START","END","log2FoldChange","padj")]
colnames(LS_v_ST.fullDAtest)[4:5] <- c("log2FC.LS_vs_ST","padj.LS_vs_ST")
LS_v_ST.fullDAtest$coords <- paste(LS_v_ST.fullDAtest$CHR, LS_v_ST.fullDAtest$START, LS_v_ST.fullDAtest$END)
## import DA test results, fix names, subset to necessary columns
LS_v_OS.fullDAtest <- DARs.LS_vs_OS.all_res[,c("CHR","START","END","log2FoldChange","padj")]
colnames(LS_v_OS.fullDAtest)[4:5] <- c("log2FC.LS_vs_OS","padj.LS_vs_OS")
LS_v_OS.fullDAtest$coords <- paste(LS_v_OS.fullDAtest$CHR, LS_v_OS.fullDAtest$START, LS_v_OS.fullDAtest$END)
## merge the 2
union2 <- merge(LS_v_ST.fullDAtest, LS_v_OS.fullDAtest, by.x= "coords", by.y= "coords")
## subset to DARs from either LS vs dF comparison
DARs.union.df <- subset(union2, padj.LS_vs_ST < 0.1 | padj.LS_vs_OS < 0.1)
##### make correlation plots from the union of DEGs ####
##### Test/plot correlation between DEGs: LS vs ST & LS vs OS ####
Title <- "Fold Change of Union DARs" #set title for exported filenames
df <- DARs.union.df #set dataframe of interest to df
dfy <- df$log2FC.LS_vs_ST #set y feature
y_axis <- "LS_vs_ST (log2FC)" #set y axis
dfx <- df$log2FC.LS_vs_OS #set x feature
x_axis <- "log2FC (log2FC)" #set x axis
Size <- element_text(size= 14) #set text size for plot
## scatterplot with best fit line
ggplot(df, aes(x=dfx, y=dfy)) + geom_point(shape=1) + geom_smooth(method=lm) + xlab(x_axis)+
ylab(y_axis) + ggtitle(Title) + theme(plot.title= element_text(size= 14, face= "bold"),
axis.text= Size, legend.text= Size, legend.title= Size,
axis.title= Size, strip.text= Size)
## fit linear model, get r2 (r is Pearson's correlation coefficient)
summary(lm(dfy ~ dfx))
graph2ppt(file=paste(Title,".correlations.pptx",sep=''), width=3, height=3.15, append=T)
##### Install/load required libraries ####
# source("http://bioconductor.org/biocLite.R")
# biocLite(pkgs = c("DESeq2","data.table","pasilla","DESeq","limma","ReportingTools",
# "GenomicRanges"))
# install.packages(pkgs= c("rJava","ReporteRs","ReporteRsjars","ggplot2","rtable","xtable",
# "VennDiagram","taRifx","devtools","dplyr","dendextend"))
# devtools::install_github('tomwenseleers/export',local=F)
source("http://faculty.ucr.edu/~tgirke/Documents/R_BioCond/My_R_Scripts/my.colorFct.R")
zzz<-c("pheatmap","grid","gplots","ggplot2","export","devtools","DESeq2","pasilla","Biobase",
"EBSeq","dplyr","data.table", "genefilter","FactoMineR","VennDiagram","DOSE","ReactomePA",
"org.Hs.eg.db","clusterProfiler","pathview","DiffBind","dendextend","limma","ReportingTools",
"TxDb.Hsapiens.UCSC.hg19.knownGene","GO.db","ChIPseeker","GenomicRanges")
lapply(zzz, require, character.only= T)
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene
|
22daa6818a90b95f967130b27b96d0b39a929acf
|
5b730d892bbfb255a0de73255f244895109b5c6f
|
/inst/unitTests/test_ssgsea.R
|
595364ba84a4144cf85f797c18e420aae51400d1
|
[] |
no_license
|
rcastelo/GSVA
|
8a8079d44b5cbb96951cca0a38844acad6412830
|
0870c685c354a8c83af2563854bb8bfe37cf86e9
|
refs/heads/devel
| 2023-08-08T23:20:50.711327
| 2023-07-28T17:30:29
| 2023-07-28T17:30:29
| 102,104,624
| 136
| 46
| null | 2023-09-13T14:40:51
| 2017-09-01T11:02:06
|
R
|
UTF-8
|
R
| false
| false
| 1,790
|
r
|
test_ssgsea.R
|
test_ssgsea <- function() {
p <- 10 ## number of genes
n <- 30 ## number of samples
nGrp1 <- 15 ## number of samples in group 1
nGrp2 <- n - nGrp1 ## number of samples in group 2
## consider three disjoint gene sets
geneSets <- list(set1=paste("g", 1:3, sep=""),
set2=paste("g", 4:6, sep=""),
set3=paste("g", 7:10, sep=""))
## sample data from a normal distribution with mean 0 and st.dev. 1
## seeding the random number generator for the purpose of this test
set.seed(123)
y <- matrix(rnorm(n*p), nrow=p, ncol=n,
dimnames=list(paste("g", 1:p, sep="") , paste("s", 1:n, sep="")))
## genes in set1 are expressed at higher levels in the last 'nGrp1+1' to 'n' samples
y[geneSets$set1, (nGrp1+1):n] <- y[geneSets$set1, (nGrp1+1):n] + 2
## estimate GSVA enrichment scores for the three sets
es <- gsva(y, geneSets, method="ssgsea", verbose=FALSE)
checkTrue(max(abs(rowMeans(es) - c(0.22893323, -0.04400744, -0.08289233))) < 1e-08)
checkTrue(max(abs(apply(es, 1,sd) - c(0.2562903, 0.2260589, 0.2268853))) < 1e-07)
gset.idx.list <- lapply(geneSets,
function(x, y) na.omit(match(x, y)),
rownames(y))
fast.gset.idx.list <- lapply(geneSets,
function(x, y) na.omit(match(x, y)),
rownames(y))
checkIdentical(gset.idx.list, fast.gset.idx.list)
R <- apply(y, 2, function(x ,p) as.integer(rank(x)), p)
alpha <- 0.25
Ra <- abs(R)^alpha
for (i in 1:n) {
geneRanking <- order(R[, i], decreasing=TRUE)
frw <- GSVA:::.fastRndWalk(gset.idx.list[[1]], geneRanking, i, Ra)
rw <- GSVA:::.rndWalk(gset.idx.list[[1]], geneRanking, i, R, alpha)
checkEqualsNumeric(rw, frw)
}
}
|
bfc4b871870e813dc61130ef8939dc594d890ad5
|
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
|
/Unsorted/dec17.R
|
b65d1aee88d42a2605fb37674563a7589eea80cc
|
[] |
no_license
|
DUanalytics/rAnalytics
|
f98d34d324e1611c8c0924fbd499a5fdac0e0911
|
07242250a702631c0d6a31d3ad8568daf9256099
|
refs/heads/master
| 2023-08-08T14:48:13.210501
| 2023-07-30T12:27:26
| 2023-07-30T12:27:26
| 201,704,509
| 203
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,296
|
r
|
dec17.R
|
paste("a", "b", se = ":")
x= 1:5
y= NULL
is.null(x)
f <- function(a, b) a^2
f(2)
paste("a", "b", sep = ":")
paste("a", "b", se = ":")
Sys.Date()
class(as.Date('1970-01-02'))
x = 1:5
apply(x, FUN=sqrt)
?apply
data(package = .packages(all.available = TRUE))
paste('Data', 'Science', 'from', 'MUIT', sep='-')
month.abb[1:12]
(sd = paste('01',month.abb[1:6],2016,sep='-'))
sd1 = as.Date(sd,'%d-%b-%Y')
saledate = rep(sd1,times=4)
saledate
dept = rep(c('Dept1','Dept2'), each=12)
(dept = factor(dept, ordered=T, levels=c('Dept2', 'Dept1')))
(city = rep(c('Delhi','Noida'), times=2, each=6))
set.seed(1234)
saleamt = ceiling(runif(24, 100, 200))
set.seed(1234)
advamt = ceiling(runif(24, 25,40))
df = data.frame(saledate, dept=as.factor(dept), city=as.factor(city), saleamt, advamt)
df
with(df, boxplot(saleamt ~ dept))
with(df, boxplot(saleamt ~ dept + city))
agdeptcity = aggregate(df$saleamt, by=list(dept, city), mean)
pie(agdeptcity$x, labels=paste(agdeptcity$Group.1, agdeptcity$Group.2, sep='-'))
(t1 = xtabs(saleamt ~ dept + city, data=df))
margin.table(t1,c(2))
addmargins(t1,c(1,2), FUN=list(list(sd,mean), sum))
prop.table(t1)*100
(df2= subset(df, city='noida', select=c(dept, saleamt,advamt)))
plot(y=df2$saleamt, x=df2$advamt)
abline(lm(df2$saleamt ~ df2$advamt))
cor(df2$saleamt, df2$advamt)
df2
df2[order(saleamt, -advamt),]
with(df2,plot(x=dept, y=saleamt))
within(df2, profit <- saleamt - advamt)
df2
(mat2 = as.matrix(df2[2:3]))
sweep(df2[2:3],2,1,'+')
reshape2::melt(df2, id='dept')
matrix1 = as.matrix(airquality)
matrix1
colSums(is.na(matrix1))
mean(matrix1[,1],na.rm=T)
colSums(is.na(matrix1))
sapply(matrix1, function(x)all(is.na(x)))
which(is.na(matrix1))
matrix1[which(is.na(matrix1[,1])),1]
length(matrix1[which(is.na(matrix1[,1])),1])
matrix1[which(is.na(matrix1[,2])),2]
length(matrix1[which(is.na(matrix1[,2])),2])
length(matrix1[which(is.na(matrix1[,1]))])
length(matrix1[which(is.na(matrix1[,2]))])
matrix1[which(is.na(matrix1[,2]))]
which(is.na(matrix1[,2]))
which(is.na(matrix1[,2]))
ceiling(mean(matrix1[,1],na.rm=T))
ceiling(mean(matrix1[,2],na.rm=T))
matrix1[which(is.na(matrix1[,1])),1] = ceiling(mean(matrix1[,1],na.rm=T))
matrix1[which(is.na(matrix1[,2])),2] = ceiling(mean(matrix1[,2],na.rm=T))
colSums(is.na(matrix1))
matrix1[which(is.na(matrix1[,2])),2]
matrix1[c(5,6,11,27,96,98)]
which(is.na(matrix1[,2]))
apply(matrix1, 2, mean)
sapply(matrix1[1:10], log)
matrix1
apply(matrix1[1:10], 2, log)
?lapply
# List
(myList = list(nostudents = 1:10, school = 'Data Science',
course=c('PG','MSc')))
length(myList$course)
#myArray
set.seed(1234)
(x=ceiling(rnorm(2*3*5*4,50,10)))
depts=c('D1','D2')
courses=c('C1','C2','C3')
students=c('S1','S2','S3','S4','S5')
subjects=c('Sb1', 'Sb2', 'Sb3', 'Sb4')
myArray = array(x, dim = c(5,4,3,2), dimnames = list(students, subjects, courses,depts))
myArray
sum(myArray)
lapply(myArray,c(4), mean)
?apply
apply(myArray, c(2), function(x) max(x))
apply(myArray, c(4), function(x) mean(x))
apply(myArray, c(1,3,4), function(x) sum(x))
apply(myArray, c(2), function(x) sd(x))
(myList = list(rollno = 100:120, school = 'Data Science',
course=c('PG','MSc')))
length(myList$rollno)
x = 11:20
for (i in c(5,2,7)) {
print(x[i]^2)
}
func1 = function(x) sqrt(x)
func1(x)
|
f0e5138632d11bd78cee2fb89df1da0b4b62eddf
|
35f844f6f5145265ffdd48c14522756030263dba
|
/man/TableCells.Rd
|
8c63eb80025797432a39d06d896df503b942a2af
|
[] |
no_license
|
cbailiss/basictabler
|
708aa9e2da21b65ef65f529be868ebcbfee4373c
|
63486f6acd7b163c28839dd607a98f4bd0e7920d
|
refs/heads/master
| 2021-07-16T05:12:03.971848
| 2021-07-01T20:48:46
| 2021-07-01T20:48:46
| 104,359,939
| 33
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 28,952
|
rd
|
TableCells.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TableCells.R
\docType{class}
\name{TableCells}
\alias{TableCells}
\title{R6 class that manages cells in a table.}
\format{
\code{\link{R6Class}} object.
}
\description{
The `TableCells` manages the `TableCell` objects that comprise a
`BasicTable` object.
}
\examples{
# This class should only be created by the table.
# It is not intended to be created outside of the table.
library(basictabler)
tbl <- qtbl(data.frame(a=1:2, b=3:4))
cells <- tbl$cells
cells$setCell(r=4, c=1, cellType="cell", rawValue=5)
cells$setCell(r=4, c=2, cellType="cell", rawValue=6)
tbl$renderTable()
}
\section{Active bindings}{
\if{html}{\out{<div class="r6-active-bindings">}}
\describe{
\item{\code{rowCount}}{The number of rows in the table.}
\item{\code{columnCount}}{The number of columns in the table.}
\item{\code{rows}}{The rows of cells in the table - represented as a list, each
element of which is a list of `TableCell` objects.}
\item{\code{all}}{A list of the cells in the table. Each element in this list is
a `TableCell` object.}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{TableCells$new()}}
\item \href{#method-reset}{\code{TableCells$reset()}}
\item \href{#method-getCell}{\code{TableCells$getCell()}}
\item \href{#method-getValue}{\code{TableCells$getValue()}}
\item \href{#method-getRowValues}{\code{TableCells$getRowValues()}}
\item \href{#method-getColumnValues}{\code{TableCells$getColumnValues()}}
\item \href{#method-setCell}{\code{TableCells$setCell()}}
\item \href{#method-setBlankCell}{\code{TableCells$setBlankCell()}}
\item \href{#method-deleteCell}{\code{TableCells$deleteCell()}}
\item \href{#method-setValue}{\code{TableCells$setValue()}}
\item \href{#method-setRow}{\code{TableCells$setRow()}}
\item \href{#method-setColumn}{\code{TableCells$setColumn()}}
\item \href{#method-extendCells}{\code{TableCells$extendCells()}}
\item \href{#method-moveCell}{\code{TableCells$moveCell()}}
\item \href{#method-insertRow}{\code{TableCells$insertRow()}}
\item \href{#method-deleteRow}{\code{TableCells$deleteRow()}}
\item \href{#method-insertColumn}{\code{TableCells$insertColumn()}}
\item \href{#method-deleteColumn}{\code{TableCells$deleteColumn()}}
\item \href{#method-getCells}{\code{TableCells$getCells()}}
\item \href{#method-findCells}{\code{TableCells$findCells()}}
\item \href{#method-getColumnWidths}{\code{TableCells$getColumnWidths()}}
\item \href{#method-asList}{\code{TableCells$asList()}}
\item \href{#method-asJSON}{\code{TableCells$asJSON()}}
\item \href{#method-clone}{\code{TableCells$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Create a new `TableCells` object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$new(parentTable = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{parentTable}}{Owning table.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-reset"></a>}}
\if{latex}{\out{\hypertarget{method-reset}{}}}
\subsection{Method \code{reset()}}{
Clear all cells from the table.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$reset()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getCell"></a>}}
\if{latex}{\out{\hypertarget{method-getCell}{}}}
\subsection{Method \code{getCell()}}{
Retrieve a specific `TableCell` object at the specified location in the
table.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$getCell(r = NULL, c = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{r}}{The row number of the cell to retrieve.}
\item{\code{c}}{The column number of the cell to retrieve.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A `TableCell` object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getValue"></a>}}
\if{latex}{\out{\hypertarget{method-getValue}{}}}
\subsection{Method \code{getValue()}}{
Retrieve the value of a specific `TableCell` object at the specified
location in the table.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$getValue(r = NULL, c = NULL, formattedValue = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{r}}{The row number of the cell value to retrieve.}
\item{\code{c}}{The column number of the cell value to retrieve.}
\item{\code{formattedValue}}{`TRUE` to retrieve the formatted (character) cell
value, `FALSE` (default) to retrieve the raw cell value (typically
numeric).}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
The value of the cell.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getRowValues"></a>}}
\if{latex}{\out{\hypertarget{method-getRowValues}{}}}
\subsection{Method \code{getRowValues()}}{
Get a vector or list of the values in a row for the entire row or a subset
of columns.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$getRowValues(
rowNumber = NULL,
columnNumbers = NULL,
formattedValue = FALSE,
asList = FALSE,
rebase = TRUE
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rowNumber}}{The row number to retrieve the values for (a single row
number).}
\item{\code{columnNumbers}}{The column numbers of the cell value to retrieve (can
be a vector of column numbers).}
\item{\code{formattedValue}}{`TRUE` to retrieve the formatted (character) cell
value, `FALSE` (default) to retrieve the raw cell value (typically
numeric).}
\item{\code{asList}}{`TRUE` to retrieve the values as a list, `FALSE` (default)
to retrieve the values as a vector.}
\item{\code{rebase}}{`TRUE` to rebase the list/vector so that the first element
is at index 1, `FALSE` to retain the original column numbers.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A vector or list of the cell values.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getColumnValues"></a>}}
\if{latex}{\out{\hypertarget{method-getColumnValues}{}}}
\subsection{Method \code{getColumnValues()}}{
Get a vector or list of the values in a column for the entire column or a
subset of rows.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$getColumnValues(
columnNumber = NULL,
rowNumbers = NULL,
formattedValue = FALSE,
asList = FALSE,
rebase = TRUE
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{columnNumber}}{The column number to retrieve the values for (a single
column number).}
\item{\code{rowNumbers}}{The row numbers of the cell value to retrieve (can be a
vector of row numbers).}
\item{\code{formattedValue}}{`TRUE` to retrieve the formatted (character) cell
value, `FALSE` (default) to retrieve the raw cell value (typically
numeric).}
\item{\code{asList}}{`TRUE` to retrieve the values as a list, `FALSE` (default)
to retrieve the values as a vector.}
\item{\code{rebase}}{`TRUE` to rebase the list/vector so that the first element
is at index 1, `FALSE` to retain the original row numbers.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A vector or list of the cell values.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setCell"></a>}}
\if{latex}{\out{\hypertarget{method-setCell}{}}}
\subsection{Method \code{setCell()}}{
Create a cell in the table and set the details of the cell.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$setCell(
r = NULL,
c = NULL,
cellType = "cell",
rawValue = NULL,
formattedValue = NULL,
visible = TRUE,
baseStyleName = NULL,
styleDeclarations = NULL,
rowSpan = NULL,
colSpan = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{r}}{The row number of the cell.}
\item{\code{c}}{The column number of the cell.}
\item{\code{cellType}}{The type of the cell - must be one of the following
values: root, rowHeader, columnHeader, cell, total.}
\item{\code{rawValue}}{The raw value of the cell - typically a numeric value.}
\item{\code{formattedValue}}{The formatted value of the cell - typically a
character value.}
\item{\code{visible}}{`TRUE` (default) to specify that the cell is visible,
`FALSE` to specify that the cell will be invisible.}
\item{\code{baseStyleName}}{The name of a style from the table theme that will be
used to style this cell.}
\item{\code{styleDeclarations}}{A list of CSS style definitions.}
\item{\code{rowSpan}}{A number greater than 1 to indicate that this cell is
merged with cells below. `NULL` (default) or 1 means the cell is not
merged across rows.}
\item{\code{colSpan}}{A number greater than 1 to indicate that this cell is
merged with cells to the right. `NULL` (default) or 1 means the cell is
not merged across columns.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A vector or list of the cell values.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setBlankCell"></a>}}
\if{latex}{\out{\hypertarget{method-setBlankCell}{}}}
\subsection{Method \code{setBlankCell()}}{
Create an empty cell in the table and set the details of the cell.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$setBlankCell(
r = NULL,
c = NULL,
cellType = "cell",
visible = TRUE,
baseStyleName = NULL,
styleDeclarations = NULL,
rowSpan = NULL,
colSpan = NULL,
asNBSP = FALSE
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{r}}{The row number of the cell.}
\item{\code{c}}{The column number of the cell.}
\item{\code{cellType}}{The type of the cell - must be one of the following
values: root, rowHeader, columnHeader, cell, total.}
\item{\code{visible}}{`TRUE` (default) to specify that the cell is visible,
`FALSE` to specify that the cell will be invisible.}
\item{\code{baseStyleName}}{The name of a style from the table theme that will be
used to style this cell.}
\item{\code{styleDeclarations}}{A list of CSS style definitions.}
\item{\code{rowSpan}}{A number greater than 1 to indicate that this cell is
merged with cells below. `NULL` (default) or 1 means the cell is not
merged across rows.}
\item{\code{colSpan}}{A number greater than 1 to indicate that this cell is
merged with cells to the right. `NULL` (default) or 1 means the cell is
not merged across columns.}
\item{\code{asNBSP}}{`TRUE` if the cell should be rendered as in HTML,
`FALSE` (default) otherwise.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A vector or list of the cell values.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-deleteCell"></a>}}
\if{latex}{\out{\hypertarget{method-deleteCell}{}}}
\subsection{Method \code{deleteCell()}}{
Replace the `TableCell` object at the specified
location in the table with a blank cell.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$deleteCell(r = NULL, c = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{r}}{The row number of the cell value to delete}
\item{\code{c}}{The column number of the cell value to delete}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
The `TableCell` object that is the new blank cell.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setValue"></a>}}
\if{latex}{\out{\hypertarget{method-setValue}{}}}
\subsection{Method \code{setValue()}}{
Update the value of a cell in the table.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$setValue(r = NULL, c = NULL, rawValue = NULL, formattedValue = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{r}}{The row number of the cell.}
\item{\code{c}}{The column number of the cell.}
\item{\code{rawValue}}{The raw value of the cell - typically a numeric value.}
\item{\code{formattedValue}}{The formatted value of the cell - typically a
character value.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setRow"></a>}}
\if{latex}{\out{\hypertarget{method-setRow}{}}}
\subsection{Method \code{setRow()}}{
Create multiple cells in one row of a table.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$setRow(
rowNumber = NULL,
startAtColumnNumber = 1,
cellTypes = "cell",
rawValues = NULL,
formattedValues = NULL,
formats = NULL,
visiblity = TRUE,
baseStyleNames = NULL,
fmtFuncArgs = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rowNumber}}{The row number where the cells will be created.}
\item{\code{startAtColumnNumber}}{The column number to start generating cells at.
Default value 1.}
\item{\code{cellTypes}}{The types of the cells - either a single value or a
vector of the same length as rawValues. Each cellType must be one of
the following values: root, rowHeader, columnHeader, cell, total.}
\item{\code{rawValues}}{A vector or list of values. A cell will be generated in
the table for each element in the vector/list.}
\item{\code{formattedValues}}{A vector or list of formatted values. Must be
either `NULL`, a single value or a vector/list of the same length as
rawValues.}
\item{\code{formats}}{A vector or list of formats. Must be either `NULL`, a
single value or a vector/list of the same length as rawValues.}
\item{\code{visiblity}}{A logical vector. Must be either a single logical value
or a vector/list of the same length as rawValues.}
\item{\code{baseStyleNames}}{A character vector. Must be either a single style
name (from the table theme) or a vector of style names of the same
length as rawValues.}
\item{\code{fmtFuncArgs}}{A list that is length 1 or the same length as the
number of columns in the row, where each list element specifies a list
of arguments to pass to custom R format functions.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setColumn"></a>}}
\if{latex}{\out{\hypertarget{method-setColumn}{}}}
\subsection{Method \code{setColumn()}}{
Create multiple cells in one column of a table.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$setColumn(
columnNumber = NULL,
startAtRowNumber = 2,
cellTypes = "cell",
rawValues = NULL,
formattedValues = NULL,
formats = NULL,
visiblity = TRUE,
baseStyleNames = NULL,
fmtFuncArgs = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{columnNumber}}{The column number where the cells will be created.}
\item{\code{startAtRowNumber}}{The row number to start generating cells at.
Default value 2.}
\item{\code{cellTypes}}{The types of the cells - either a single value or a
vector of the same length as rawValues. Each cellType must be one of
the following values: root, rowHeader, columnHeader, cell, total.}
\item{\code{rawValues}}{A vector or list of values. A cell will be generated in
the table for each element in the vector/list.}
\item{\code{formattedValues}}{A vector or list of formatted values. Must be
either `NULL`, a single value or a vector of the same length as
rawValues.}
\item{\code{formats}}{A vector or list of formats. Must be either `NULL`, a
single value or a vector of the same length as rawValues.}
\item{\code{visiblity}}{A logical vector. Must be either a single logical value
or a vector of the same length as rawValues.}
\item{\code{baseStyleNames}}{A character vector. Must be either a single style
name (from the table theme) or a vector of style names of the same
length as rawValues.}
\item{\code{fmtFuncArgs}}{A list that is length 1 or the same length as the
number of rows in the column, where each list element specifies a list
of arguments to pass to custom R format functions.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-extendCells"></a>}}
\if{latex}{\out{\hypertarget{method-extendCells}{}}}
\subsection{Method \code{extendCells()}}{
Enlarge a table to the specified size.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$extendCells(rowCount = NULL, columnCount = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rowCount}}{The number of rows in the enlarged table.}
\item{\code{columnCount}}{The number of columns in the enlarged table.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-moveCell"></a>}}
\if{latex}{\out{\hypertarget{method-moveCell}{}}}
\subsection{Method \code{moveCell()}}{
Move a table cell to a different location in the table.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$moveCell(r = NULL, c = NULL, cell = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{r}}{The new row number to move the cell to.}
\item{\code{c}}{The new column number to move the cell to.}
\item{\code{cell}}{The `TableCell` object to move.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-insertRow"></a>}}
\if{latex}{\out{\hypertarget{method-insertRow}{}}}
\subsection{Method \code{insertRow()}}{
Insert a new row in the table at the specified row number and shift existing cells on/below this row down by one row.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$insertRow(
rowNumber = NULL,
insertBlankCells = TRUE,
headerCells = 1,
totalCells = 0
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rowNumber}}{The row number where the new row is to be inserted.}
\item{\code{insertBlankCells}}{`TRUE` (default) to insert blank cells in the new row, `FALSE` to create no cells in the new row.}
\item{\code{headerCells}}{The number of header cells to create at the start of the row. Default value 1.}
\item{\code{totalCells}}{The number of total cells to create at the end of the row. Default value 0.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-deleteRow"></a>}}
\if{latex}{\out{\hypertarget{method-deleteRow}{}}}
\subsection{Method \code{deleteRow()}}{
Delete the row in the table at the specified row number and shift existing cells below this row up by one row.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$deleteRow(rowNumber = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rowNumber}}{The row number of the row to be deleted.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-insertColumn"></a>}}
\if{latex}{\out{\hypertarget{method-insertColumn}{}}}
\subsection{Method \code{insertColumn()}}{
Insert a new column in the table at the specified column number and shift existing cells in/to the right of this column across by one row.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$insertColumn(
columnNumber = NULL,
insertBlankCells = TRUE,
headerCells = 1,
totalCells = 0
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{columnNumber}}{The column number where the new column is to be inserted.}
\item{\code{insertBlankCells}}{`TRUE` (default) to insert blank cells in the new column, `FALSE` to create no cells in the new column}
\item{\code{headerCells}}{The number of header cells to create at the top of the column. Default value 1.}
\item{\code{totalCells}}{The number of total cells to create at the bottom of the column. Default value 0.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-deleteColumn"></a>}}
\if{latex}{\out{\hypertarget{method-deleteColumn}{}}}
\subsection{Method \code{deleteColumn()}}{
Delete the column in the table at the specified column number and shift existing cells to the right of this column to the left by one column.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$deleteColumn(columnNumber = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{columnNumber}}{The column number of the column to be deleted.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
No return value.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getCells"></a>}}
\if{latex}{\out{\hypertarget{method-getCells}{}}}
\subsection{Method \code{getCells()}}{
Retrieve cells by a combination of row and/or column numbers.
See the "Finding and Formatting" vignette for graphical examples.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$getCells(
specifyCellsAsList = TRUE,
rowNumbers = NULL,
columnNumbers = NULL,
cellCoordinates = NULL,
excludeEmptyCells = FALSE,
matchMode = "simple"
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{specifyCellsAsList}}{`TRUE`/`FALSE` to specify how cells are retrieved.
Default `TRUE`. More information is provided in the details section.}
\item{\code{rowNumbers}}{A vector of row numbers that specify the rows or
cells to retrieve.}
\item{\code{columnNumbers}}{A vector of row numbers that specify the columns
or cells to retrieve.}
\item{\code{cellCoordinates}}{A list of two-element vectors that specify the
coordinates of cells to retrieve. Ignored when `specifyCellsAsList=FALSE`.}
\item{\code{excludeEmptyCells}}{Default `FALSE`. Specify `TRUE` to exclude empty
cells.}
\item{\code{matchMode}}{Either "simple" (default) or "combinations"\cr
"simple" specifies that row and column arguments are considered separately
(logical OR), e.g. rowNumbers=1 and columnNumbers=2 will match all cells in
row 1 and all cells in column 2.\cr
"combinations" specifies that row and column arguments are considered together
(logical AND), e.g. rowNumbers=1 and columnNumbers=2 will match only the
cell single at location (1, 2).\cr
Arguments `rowNumbers`, `columnNumbers`, `rowGroups` and `columnGroups` are
affected by the match mode. All other arguments are not.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
When `specifyCellsAsList=TRUE` (the default):\cr
Get one or more rows by specifying the row numbers as a vector as
the rowNumbers argument and leaving the columnNumbers argument set
to the default value of `NULL`, or\cr
Get one or more columns by specifying the column numbers as a vector
as the columnNumbers argument and leaving the rowNumbers argument
set to the default value of `NULL`, or\cr
Get one or more individual cells by specifying the cellCoordinates
argument as a list of vectors of length 2, where each element in the
list is the row and column number of one cell,\cr
e.g. `list(c(1, 2), c(3, 4))` specifies two cells, the first located
at row 1, column 2 and the second located at row 3, column 4.\cr
When `specifyCellsAsList=FALSE`:\cr
Get one or more rows by specifying the row numbers as a vector as the
rowNumbers argument and leaving the columnNumbers argument set to the
default value of `NULL`, or\cr
Get one or more columns by specifying the column numbers as a vector
as the columnNumbers argument and leaving the rowNumbers argument set
to the default value of `NULL`, or\cr
Get one or more cells by specifying the row and column numbers as vectors
for the rowNumbers and columnNumbers arguments, or\cr
a mixture of the above, where for entire rows/columns the element in the
other vector is set to `NA`, e.g. to retrieve whole rows, specify the row
numbers as the rowNumbers but set the corresponding elements in the
columnNumbers vector to `NA`.
}
\subsection{Returns}{
A list of `TableCell` objects.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-findCells"></a>}}
\if{latex}{\out{\hypertarget{method-findCells}{}}}
\subsection{Method \code{findCells()}}{
Find cells matching specified criteria.
See the "Finding and Formatting" vignette for graphical examples.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$findCells(
minValue = NULL,
maxValue = NULL,
exactValues = NULL,
valueRanges = NULL,
includeNull = TRUE,
includeNA = TRUE,
emptyCells = "include",
rowNumbers = NULL,
columnNumbers = NULL,
cellCoordinates = NULL,
cells = NULL,
rowColumnMatchMode = "simple"
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{minValue}}{A numerical value specifying a minimum value threshold.}
\item{\code{maxValue}}{A numerical value specifying a maximum value threshold.}
\item{\code{exactValues}}{A vector or list specifying a set of allowed values.}
\item{\code{valueRanges}}{A vector specifying one or more value range expressions which
the cell values must match. If multiple value range expressions are specified,
then the cell value must match any of one the specified expressions.}
\item{\code{includeNull}}{Specify TRUE to include `NULL` in the matched cells,
FALSE to exclude `NULL` values.}
\item{\code{includeNA}}{Specify TRUE to include `NA` in the matched cells,
FALSE to exclude `NA` values.}
\item{\code{emptyCells}}{A word that specifies how empty cells are matched -
must be one of "include" (default), "exclude" or "only".}
\item{\code{rowNumbers}}{A vector of row numbers that specify the rows or
cells to constrain the search.}
\item{\code{columnNumbers}}{A vector of column numbers that specify the columns
or cells to constrain the search.}
\item{\code{cellCoordinates}}{A list of two-element vectors that specify the
coordinates of cells to constrain the search.}
\item{\code{cells}}{A `TableCell` object or a list of `TableCell`
objects to constrain the scope of the search.}
\item{\code{rowColumnMatchMode}}{Either "simple" (default) or "combinations":\cr
"simple" specifies that row and column arguments are considered separately
(logical OR), e.g. rowNumbers=1 and columnNumbers=2 will match all cells in
row 1 and all cells in column 2.\cr
"combinations" specifies that row and column arguments are considered together
(logical AND), e.g. rowNumbers=1 and columnNumbers=2 will match only the
cell single at location (1, 2).\cr
Arguments `rowNumbers`, `columnNumbers`, `rowGroups` and `columnGroups` are
affected by the match mode. All other arguments are not.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A list of `TableCell` objects.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getColumnWidths"></a>}}
\if{latex}{\out{\hypertarget{method-getColumnWidths}{}}}
\subsection{Method \code{getColumnWidths()}}{
Retrieve the width of the longest value
(in characters) in each column.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$getColumnWidths()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
The width of the column in characters.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-asList"></a>}}
\if{latex}{\out{\hypertarget{method-asList}{}}}
\subsection{Method \code{asList()}}{
Return the contents of this object as a list for debugging.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$asList()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
A list of various object properties.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-asJSON"></a>}}
\if{latex}{\out{\hypertarget{method-asJSON}{}}}
\subsection{Method \code{asJSON()}}{
Return the contents of this object as JSON for debugging.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$asJSON()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
A JSON representation of various object properties.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TableCells$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
75426aae05fa797a9af401ac1c3d6750c3e15b8d
|
dab20b21827a84261e457e87fd9082a1b6488a1c
|
/script_raw/BaseModel_prediction.R
|
a61a274b41293a272b7695874b92bc31bdbf9bb0
|
[] |
no_license
|
MikyPiky/Project2Script
|
583da1740541f755a8fbe0195366e7fa7882ab44
|
93424306f2baeda5c769a09eff6c99d873bda41c
|
refs/heads/master
| 2021-01-02T22:37:01.995232
| 2018-01-26T20:41:08
| 2018-01-26T20:41:08
| 99,354,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,494
|
r
|
BaseModel_prediction.R
|
#### Description of Script ####
'
- Use models estimated in BaseModel.R to predict siloMaize yield Anomalies
- Loop through those models to make prediction for each year (maps) and comId (time series)
'
#### Output ####
## Files
'
- Maize_meteo including the predicted values from the models in BaseModel.R "./data/data_processed/Maize_meteo_predicted.csv"
'
## Plots
' - maps of predicted silage maize anomalies for each year in the training period ->
"./figures/figures_exploratory/Train/", modelListNames[[i]], "/Yield_predict_anomaly_", listyear[m],".pdf"
- com specific sum of predicred values -> /figures/figures_exploratory/Train/", modelListNames[[i]], "/Yield_predict_sumsComId.pdf"
- time series for each com -> ./figures/figures_exploratory/Train/", modelListNames[[i]],"/TimeSeries/administrative_districts/timeSeries_yieldAnomaly_",
comId_list$comId[r]
'
#### Dependencies and Input ####
' - BaseModel.R
- vg2500_krs <- read_sf("./../Proj1/data/data_spatial/", "vg2500_krs")
'
###################
## Load Packages ##
source("./script/script_raw/Packages.R")
##############################################################################################################################################################################
#############################################################################################################################################################################
#############################################################################################################################################################################
# rm(list=ls())
getwd()
#################################################
#### Read in data.frame and estimate models ####
###############################################
source("./script/script_raw/BaseModel.R")
#################################################################################################################################################################
###########################################################
#### Predictions on observational data - annual junks ####
#########################################################
#################################################################################################################################################################
#######################################################################
#### Loop through those models to make predictions for each year #####
#####################################################################
################################################
#### Load shape of administrative districts ####
vg2500_krs <- read_sf("./../Proj1/data/data_spatial/", "vg2500_krs")
#### Change RS to five digits #####
vg2500_krs$RS <- as.factor(as.integer(str_sub(vg2500_krs$RS, 1,5)))
vg2500_krs$RS
names(vg2500_krs)[2] <- "comId"
######################
#### Prepare loop ####
## Set years to loop through ##
listyear <- seq(1999, 2015)
result <- data.frame(matrix(nrow = 4625, ncol = 10))
######################################################################
#### Loop thorugh 1999 to 2015 to make predictions for each year ####
####################################################################
for (i in seq_along(modelList)){
#### Create directories output saved in ####
# dir.create(paste("./figures/figures_exploratory/Train/", modelListNames[[i]], sep=""), showWarnings = F)
# dir.create(paste("./data/data_processed/Train/", modelListNames[[i]], sep=""), showWarnings = F)
'Now all files are saved in one output, compareable to climate predictions. '
### Create container to store predicted data of each annual chunk ####
# predictData_train_anomaly_allyears <- data.frame()
##############################################################
#### Predict YIELD ANOMALY model using model in modelList ####
predict_year_anomaly <- as.tibble(predict(modelList[[i]], newdata = Maize_meteo))
names(predict_year_anomaly) <- paste("siloMaizeAnomaly_predicted")
#### Combine with Maize_meteo to allow spatial plots later ####
Maize_meteo_predicted <- bind_cols( Maize_meteo[ c(1:5,8)], predict_year_anomaly)
########################################################
#### Export the data.frame of the predicted values ####
#########################################################################################################################
#### Include Model into name of predicted siloMais ####
names(predict_year_anomaly) <- paste("sMA", modelListNames[i], sep="_")
#### Combine with Maize_meteo ####
result[,i] <- predict_year_anomaly
names(result )[i] <- paste("sMA", modelListNames[i], sep="_")
# ##########################################################################
# #### Start loop trough 17 years in training data to make plotted maps ####
#
# ## Define colors ##
# Maize_meteo$siloMaizeAnomaly
# summary(Maize_meteo$siloMaizeAnomaly)
# myPalette_anomaly <- colorRampPalette((brewer.pal(11, "BrBG")))
# sc_anomaly <- scale_fill_gradientn("Yield Deviation", colours = myPalette_anomaly(400), limits=c(- 200, 200))
#
# for(m in 1:17){
#
# ## Create data.frame for the year m ##
# Maize_meteo_year <- Maize_meteo_predicted %>% select(year:state, siloMaizeAnomaly_predicted ) %>% filter(year == listyear[[m]] )
# Maize_meteo_year
#
# ##############################
# #### Plot predicted data ####
# ############################
#
# ####################################
# #### Create spatial data.frame #####
# predictData_train_sf <- NULL
# predictData_train_sf <- inner_join(vg2500_krs, Maize_meteo_year , by="comId")
# predictData_train_sf
#
#
# ## Anomaly
# predictData_train_sf_anomaly_plot <-
# ggplot( predictData_train_sf) +
# geom_sf(data=vg2500_krs, fill="gray", color="white") +
# geom_sf(aes(fill = predictData_train_sf$siloMaizeAnomaly_predicted )) +
# guides(fill = guide_legend(title = "Predicted Yield Anomaly")) +
# sc_anomaly +
# ggtitle(paste(listyear[m], modelListNames[i], sep = " - ")) +
# theme_bw() +
# theme(plot.title = element_text(hjust = 0.5))
#
# #### Save the plots ####
# ggsave(paste("./figures/figures_exploratory/Train/", modelListNames[[i]], "/Yield_predict_anomaly_", listyear[m],".pdf", sep=""),predictData_train_sf_anomaly_plot , device="pdf", width=8, height= 8)
# # ggsave(paste("./figures/figures_exploratory/Train/", modelListNames[[i]], "/Yield_predict_", listyear[m],".pdf", sep=""), predictData_train_sf_absolut_plot, device="pdf", width=8, height= 8)
#
# } ## End of loop through 17 years in training data
#############################################################
#### Greate Maize_meteo_predicted including all modells ####
###########################################################
Maize_meteo_predicted <- bind_cols( Maize_meteo[ c(1:5,8)], result)
} # Close loop through to predictive models
#### Write Maize_meteo including predicted siloMaize Anomalies derived from the models in BaseModel.R ###
write_csv(Maize_meteo_predicted, "./data/data_processed/Maize_meteo_predicted.csv")
# ##############################################################################################################################################################################################
# ###############################################################################################
# #### Make Plots of sums the anomalies within each comID and the time series of each comID ####
# #############################################################################################
# ##############################################################################################################################################################################################
# rm(list=ls())
# #################################################
# #### Read in data.frame and estimate models ####
# ###############################################
# source("./script/script_raw/BaseModel.R")
#
#
# #################################################
# #### Load shape of administrative districts ####
# ###############################################
# vg2500_krs <- read_sf("./../Proj1/data/data_spatial/", "vg2500_krs")
# vg2500_krs
#
# #### Change RS to five digits #####
# vg2500_krs$RS <- as.factor(as.integer(str_sub(vg2500_krs$RS, 1,5)))
# vg2500_krs$RS
# names(vg2500_krs)[2] <- "comId"
#
# #### Read in avgYield_comId to extract 334 coms ####
# avgYield_comId <- read_csv( file="./data/data_processed/avgYield_comId.csv")
# avgYield_comId
# avgYield_comId$comId <-as.factor(avgYield_comId$comId)
#
# ###########################################################
# #### Make comId list including comIds and comIds Names ####
# comList <- inner_join(avgYield_comId, vg2500_krs, by="comId")
# comList
#
# comId_list <- comList %>% select (comId, GEN) %>% as.list()
# comId_list
#
# #############################################
# #### Read in data of predicted anomalies ####
# predictData_train_anomaly_allyears <- read_csv(paste("./data/data_processed/Maize_meteo_predicted.csv", sep="" ))
# predictData_train_anomaly_allyears <- predictData_train_anomaly_allyears %>% mutate_at("comId", as.factor)
#
# predictData_train_anomaly_allyears
#
# ############################################################################
# #### Add spatial information - make sf data.frame to allow map plotting ####
# predictData_train_anomaly_allyears_sf <- inner_join(vg2500_krs, predictData_train_anomaly_allyears, by = "comId")
# predictData_train_anomaly_allyears_sf
#
# ##########################################################
# #### Calculate sums of the predictions for each comId ####
# predictData_train_sums <-
# predictData_train_anomaly_allyears_sf %>%
# group_by(comId) %>%
# select( starts_with("sMA_")) %>%
# summarise(. = sum(.))
#
# predictData_train_sums
#
#
#
# ##############################################
# #### Start loop thorugh predictive models ####
# for (i in seq_along(modelList)){
#
# ############################################
# #### Create directories output saved in ####
# dir.create(paste("./figures/figures_exploratory/Train/", modelListNames[[i]], sep=""), showWarnings = F)
# dir.create(paste("./data/data_processed/Train/", modelListNames[[i]], sep=""), showWarnings = F)
# dir.create(paste("./figures/figures_exploratory/Train/", modelListNames[[i]], "/TimeSeries/", sep=""), showWarnings = F)
# dir.create(paste("./figures/figures_exploratory/Train/", modelListNames[[i]], "/TimeSeries/administrative_districts/", sep=""), showWarnings = F)
#
#
#
# ##########################################################################
# #### Plot sums of Predictions over years of each comId in train Data ####
# myPalette <- colorRampPalette((brewer.pal(11, "BrBG")))
# sc <- scale_fill_gradientn(colours = colorRampPalette((brewer.pal(11, "BrBG")))(100), limits=c(- 300, 300))
#
# predictData_train_sums_alone <- predictData_train_sums %>% select(names(predictData_train_sums) [1+i])
# names( predictData_train_sums_alone )[2] <- "sMA"
#
# predictData_train_sums_plot <-
# ggplot(predictData_train_sums_alone) +
# geom_sf(data=vg2500_krs, fill="gray", color="white") +
# # guides(fill = guide_legend(label = T)) +
# geom_sf(aes(fill = sMA)) +
# ggtitle(paste("Sums of annual predictions", modelListNames[i], sep=" - ") ) + theme_bw() +
# theme(plot.title = element_text(hjust = 0.5), legend.title=element_blank()) +
# sc
#
# ggsave(paste("./figures/figures_exploratory/Train/", modelListNames[[i]], "/Yield_predict_sumsComId.pdf", sep=""), predictData_train_sums_plot , device="pdf", width=8, height= 8)
#
# ' Hier gibt es den Unterschied das Modelle mit Fixed Effekten sich zu Null summieren, während das bei Modelle ohne nicht der Fall ist. Für mich bedeutet das, dass in den
# Fixed Effekten Informationen sind über die Periode 1999 - 2015, welche in den reinen Anomalie Modellen fehlen. Dort sind die Daten demeaned für die Period ab 1951 sowohl
# für die Meteorologie als auch den SMI. '
#
# ###########################################################################
# #### Loop to plot time series of yield and yield anomaly of each year ####
# #########################################################################
# for (r in seq_along(comId_list$comId)){
#
#
# ###############################
# #### Filter for each comID ####
# predictData_train_anomaly_allyears_year <-
# predictData_train_anomaly_allyears %>%
# filter(comId == comId_list$comId[r]) %>%
# select(year, starts_with("sMA_"))
#
# predictData_train_anomaly_allyears_year_alone <- predictData_train_anomaly_allyears_year %>%
# select(year, paste(names(predictData_train_anomaly_allyears_year)[1+i]) )
#
# names( predictData_train_anomaly_allyears_year_alone) <- c("year", "Yield_anomaly")
#
# ##########################################
# #### Plot yield anomalies time series ####
# timeseries_anomaly <- ggplot(predictData_train_anomaly_allyears_year_alone , aes(year,Yield_anomaly)) +
# ylim(-200, 200) +
# geom_point(size=0.5, color="grey") +
# # stat_density_2d(geom = "raster", aes(fill = ..density..), contour = FALSE) +
# geom_hline(aes(yintercept = mean(Yield_anomaly))) +
# geom_smooth(method = "lm", se = FALSE, color="orange", size=1.5) +
# geom_smooth(color="green", se = FALSE, fill="red", size=1.5) +
# # geom_quantile(quantiles = c(0.1, 0.9), method = "rqss", lambda = 80, size=1.5) +
# ggtitle(paste(comId_list$comId [[r]], comId_list$GEN[[r]], modelListNames[[i]], sep = " - ")) +
# theme_minimal() +
# theme(plot.title = element_text(hjust = 0.5)) +
# ylab("Silage Maize Yield Anomaly")
#
# ggsave(paste("./figures/figures_exploratory/Train/", modelListNames[[i]],"/TimeSeries/administrative_districts/timeSeries_yieldAnomaly_",
# comId_list$comId[r], ".pdf", sep=""),
# plot = timeseries_anomaly , width=14, height=8)
# } ## End of Loop to produce time series
#
#
# } ## END OF LOOP WHICH LOOPS THROUGH THE DIFFERENT PREDICTIVE MODELS
#
#
# rm(list=ls())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.