blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ca8151152d6f9a4bfb89c513d3ae40425fdc841
|
147c883844e11f737c11602e9e605250b35d163c
|
/R/calculatePreferenceSystemConsistency.R
|
49674fb445fa0a2b74330a99259c8cb887bd0e47
|
[] |
no_license
|
florianfendt/dips
|
be31f80950fa12d3d12ce4e13045fa42ab2a4288
|
1856f07c7abe4667c63744186348d47929351943
|
refs/heads/master
| 2021-04-09T11:12:04.899527
| 2018-08-03T11:53:46
| 2018-08-03T11:53:46
| 124,410,781
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,535
|
r
|
calculatePreferenceSystemConsistency.R
|
#' @title Calculate Preference System Consistency
#' @description
#' Calculates the granularity up to which the
#' given Preference System is consistent.\cr
#' Throws an error if computation fails.
#' @template arg_ps
#' @template arg_showinfo
#' @return [\code{ConsistencyResult}] With entries:\cr
#' opt.val: Optimal value of the objective function.\cr
#' opt.vec: Optimal found solution vector.
#' @template ref_jansen
#' @export
calculatePreferenceSystemConsistency = function(ps, show.info = TRUE) {
assertLogical(show.info, len = 1L)
ps = checkPreferenceSystem(ps)
I.R1 = getI(ps$R1)
P.R1 = getP(ps$R1)
I.R2 = getI(ps$R2)
P.R2 = getP(ps$R2)
obj.f = c(rep(0, times = nrow(ps$df)), 1)
n.f = length(obj.f)
const = as.data.frame(diag(rep(1, times = length(obj.f))))
const = rbind(const, const)
names(const) = 1:n.f
rhos = c(rep(1, n.f), rep(0, n.f))
const.dir = c(rep("<=", n.f), rep(">=", n.f))
const.I.R1 = rbindForLists(apply(I.R1, 1L, makeConstraint, n = n.f, type = 1L))
const.P.R1 = rbindForLists(apply(P.R1, 1L, makeConstraint, n = n.f, type = 2L))
const.I.R2 = rbindForLists(apply(I.R2, 1L, makeConstraint, n = n.f, type = 3L))
const.P.R2 = rbindForLists(apply(P.R2, 1L, makeConstraint, n = n.f, type = 4L))
const.add = rbind(const.I.R1, const.P.R1, const.I.R2, const.P.R2, stringsAsFactors = FALSE)
const = rbind(const, const.add[1:n.f])
rhos = c(rhos, const.add$rhos)
const.dir = c(const.dir, const.add$const.dir)
linear.program = lp(direction = "max", obj.f, const,
const.dir, rhos)
opt.val = linear.program$objval
if (show.info) {
if (opt.val > 0) {
message(sprintf("Success: The Preference System is consistent,
with granularity up to %f", opt.val))
} else {
message("Failure: The Preference System is not consistent.")
}
}
res = makeS3Obj("ConsistencyResult", opt.val = opt.val,
opt.vec = linear.program$solution)
return(res)
}
#' @title Get Indifference of Set
#' @description
#' Calculates indifference part from a \code{matrix} or a \code{data.frame.}
#' @param x [\code{matrix}|\code{data.frame}]\cr
#' Matrix or data.frame with 2 or 4 columns to work on R1, R2 respectively.
#' @return [\code{data.frame}]
#' @export
getI = function(x) {
if(class(x) == "matrix") {
x = as.data.frame(x)
}
n.col = ncol(x)
if (n.col == 2L) {
mutated = x[, 2:1]
I.ps = apply(mutated, 1L, function(y) {
x[x[, 1L] == y[1L] & x[, 2L] == y[2L], ]
})
} else {
if (n.col == 4L) {
mutated = x[, c(3:4, 1:2)]
I.ps = apply(mutated, 1L, function(y) {
x[x[, 1L] == y[1L] & x[, 2L] == y[2L] & x[, 3L] == y[3L] & x[, 4L] == y[4L], ]
})
}
}
res = do.call("rbind", I.ps)
if (is.null(res)) {
res = data.frame()
}
res
}
#' @title Get Strict Part Of Set
#' @description
#' Calculates strict part from a \code{matrix} or a \code{data.frame.}
#' @param x [\code{matrix}|\code{data.frame}]\cr
#' Matrix or data.frame with 2 or 4 columns to work on R1, R2 respectively.
#' @return [\code{data.frame}]
#' @export
getP = function(x) {
I.x = getI(x)
indifferents = rownames(I.x)
if (length(indifferents > 0L)) {
x = x[rownames(x) %nin% indifferents, ]
}
x
}
#' @export
print.ConsistencyResult = function(x, ...) {
if (x$opt.val > 0) {
catf("PreferenceSystem is consistent with granularity
up to: %f", x$opt.val)
} else {
catf("PreferenceSystem is not consistent, optimal value
is not positive: %f", x$opt.val)
}
}
|
11de05068f1b611c672db8f106750de6644c51aa
|
12c4f31e81f1cb0ac3234b3cb25adaeb98cd42e4
|
/BuildBook.R
|
b9819ee5f69fd040a6dc943269ed495248a2971f
|
[
"MIT"
] |
permissive
|
juchiyu/MultivarBook
|
dd1f9abb672b63ba8c352cdf538dbce8ffc0594e
|
3fae162495c76e6967e7b36e21a3a3bb6cdcc09a
|
refs/heads/master
| 2020-09-09T02:40:15.980792
| 2020-02-04T07:40:12
| 2020-02-04T07:40:12
| 221,320,688
| 3
| 2
|
MIT
| 2020-01-14T17:12:35
| 2019-11-12T22:02:47
|
HTML
|
UTF-8
|
R
| false
| false
| 96
|
r
|
BuildBook.R
|
# Run this line to build the book
bookdown::render_book("00-00-index.Rmd", "bookdown::gitbook")
|
356b12ad194036bd0eeb29db7483de45773649ce
|
cecf7913ef5829f1fef0a38b2720b929a0a173e8
|
/global.R
|
b457355f18b416149eb5cfca10f9a6cfc8372013
|
[] |
no_license
|
ashten28/ranchi
|
5af242f6f10869cef6fa2b7ea6353d64a5e270b7
|
8bf03911ef96e37997c9bd2d624bd819813a3def
|
refs/heads/master
| 2023-02-09T18:43:13.782920
| 2020-12-27T16:09:58
| 2020-12-27T16:09:58
| 242,119,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 872
|
r
|
global.R
|
# Author: Ashten Anthony
# Email : ashten.anthony@nmg-group.com
options(shiny.launch.browser = TRUE)
# r packages to load
library(dplyr)
library(stringr)
library(shiny)
library(scales)
library(shinydashboard)
library(shinythemes)
library(shinyWidgets)
library(shinyjs)
library(googledrive)
library(googlesheets4)
# theme pallete
theme_pallete <-
c("#dfe5ef", "#c3cfe1", "#9db1cf", "#6f8db9", "#4c6c9c", "#364d6e")
scales::show_col(theme_pallete)
# read data
places <- read.csv("www/data/places.csv")
# filter for restaurants
restaurants <-
places %>%
filter(restaurant == 1)
# get distinct list
cuisine_list <-
restaurants %>%
distinct(cuisine) %>%
pull(cuisine)
type_list <-
restaurants %>%
distinct(type) %>%
pull(type)
names(cuisine_list) <-
str_to_title(cuisine_list)
names(type_list) <-
str_to_title(type_list)
|
41b44f83dcbe7fdbd65fcb2bd825655c2d97bb9d
|
5390b30d1f233b024479c7e5199a39ccab75db24
|
/man/tie.Rd
|
c2873ca71b892b4dcc48db6bf23a4bf5faf54910
|
[] |
no_license
|
TilburgNetworkGroup/remstats
|
65d5c6046612bc6b954a61f8e78f8471887400c9
|
19799f91a9906312e89ba7fe58bef33e49a6b6f1
|
refs/heads/master
| 2023-07-19T18:21:13.674451
| 2023-07-13T14:11:23
| 2023-07-13T14:11:23
| 248,442,585
| 4
| 1
| null | 2023-09-05T08:36:29
| 2020-03-19T07:54:52
|
R
|
UTF-8
|
R
| false
| true
| 1,705
|
rd
|
tie.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/effects.R
\name{tie}
\alias{tie}
\title{tie}
\usage{
tie(x, variableName = NULL, scaling = c("none", "std"))
}
\arguments{
\item{x}{a matrix with attribute information, rows and columns should refer
to actors in the edgelist}
\item{variableName}{optionally, a string indicating the variable name, used
for the dimnames of the output statistics object}
\item{scaling}{the method for scaling the statistic. Default is to not scale
the statistic. Alternatively, standardization of the statistic per time
point can be requested with "std".}
}
\description{
Specifies the statistic for a "tie" (or, "dyad") effect.
}
\details{
The "tie" effect refers to an exogenous dyad attribute that affects dyad
\emph{(i,j)}'s rate of interacting (tie-oriented model) or actor \emph{j}'s
probability of being chosen as a receiver for the event send by the active
sender \emph{i} (actor-oriented model). The statistic is equal to the value
of the exogenous attribute for dyad \emph{(i,j)} in matrix \code{x}.
}
\examples{
data(info, package = "remstats")
actors <- unique(info$name)
age <- info[match(actors, info$name), "age"]
both_old <- sapply(seq_along(actors), function(i) {
sapply(seq_along(actors), function(j) {
ifelse(age[i] == 1 & age[j] == 1 & i != j, 1, 0)
})
})
rownames(both_old) <- colnames(both_old) <- actors
reh_tie <- remify::remify(history, model = "tie")
effects <- ~ tie(both_old, variableName = "both.old")
remstats(reh = reh_tie, tie_effects = effects, attr_data = info)
reh_actor <- remify::remify(history, model = "actor")
remstats(reh = reh_actor, receiver_effects = effects, attr_data = info)
}
|
8c509929eeda423e90c510f3b9295f9abcf1d498
|
0aad896cc54cb251019af757bc4eecae99d05e53
|
/IBEX35.R
|
bd2055f243615f72b2663040eb5302503217e406
|
[] |
no_license
|
rytakahas/DMwR
|
ff975bba121bcdc46e3091f87fec53fbab16ea85
|
fc69a81bb57cb1029b8097a52b382b0a186ea0ac
|
refs/heads/master
| 2021-01-10T03:54:28.636814
| 2016-03-11T22:20:57
| 2016-03-11T22:20:57
| 53,037,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,475
|
r
|
IBEX35.R
|
# TODO: Add comment
#
# http://www.dcc.fc.up.pt/~ltorgo/DataMiningWithR/
#
###############################################################################
library(DMwR)
library(xts)
x1 <- xts(rnorm(100), seq(as.POSIXct("2000-01-01"), len = 100, by = "day"))
x1[1:5]
x2 <- xts(rnorm(100), seq(as.POSIXct("2000-01-01 13:00"), len = 100, by = "min"))
x2[1:4]
x3 <- xts(rnorm(3), as.Date(c("2005-01-01", "2005-01-10","2005-01-12")))
x3
x1[as.POSIXct("2000-01-04")]
x1["2000-01-05"]
x1["20000105"]
x1["200004"]
x1["2000-03-27/"]
x1["2000-02-26/2000-03-03"]
x1["/20000103"]
mts.vals <- matrix(round(rnorm(25),2),5,5)
colnames(mts.vals) <- paste('ts',1:5,sep = '')
mts <- xts(mts.vals,as.POSIXct(c('2003-01-01','2003-01-04',
'2003-01-05','2003-01-06','2003-02-16')))
mts
mts["2003-01",c("ts2","ts5")]
index(mts)
coredata(mts)
library(tseries)
NASDAQ <-as.xts(get.hist.quote("^IXIC",start = "1971-02-05",
quote = c("Open","High","Low","Close","Volume","AdjClose")))
library(quantmod)
setSymbolLookup(IXIC=list(name = '^IXIC', src = 'yahoo'),
USDEUR = list(name = 'USD/EUR', src = 'oanda'))
getSymbols(c('IXIC','USDEUR'))
head(IXIC)
head(USDEUR)
library("RODBC");
conn <- odbcConnect("Impala")
result <- sqlQuery(conn, "select * from stock limit 3")
library(DBI)
library(RMySQL)
drv <- dbDriver("MySQL")
ch <- dbConnect(drv, dbname = "Quotes", "root", "cloudera")
allQuotes <- dbGetQuery(ch, "select * from gspc")
GSPC <- xts(allQuotes[,-1], order.by = as.Date(allQuotes[,1]))
head(GSPC)
dbDisconnect(ch)
dbUnloadDriver(drv)
setSymbolLookup(NASDAQ=list(name='^IXIC',src='mysql',
db.fields = c('Index','Open','High','Low','Close','Volume','AdjClose'),
user = 'root', password = 'cloudera', dbname = 'Quotes'))
getSymbols('NASDAQ')
### Defining the Prediction Tasks
T.ind <- function(quotes, tgt.margin = 0.025, n.days = 10){
#high, low, and close quotes
v <- apply(HLC(quotes),1,mean)
r <- matrix(NA,ncol = n.days, nrow = NROW(quotes))
for(x in 1:n.days) r[,x] <- Next(Delt(v, k=x),x)
x <- apply(r, 1, function(x) sum(x[x > tgt.margin | x < tgt.margin]))
if (is.xts(quotes)) xts(x,time(quotes)) else x
}
setSymbolLookup(IBEX35=list(name = '^IBEX', src = 'yahoo'))
getSymbols(c('IBEX35'))
IBEX35 <-as.xts(get.hist.quote("^IBEX",start = "1993-02-15",
quote = c("Open","High","Low","Close","Volume","AdjClose")))
candleChart(last(IBEX35,'6 months'), theme = 'white', TA = NULL)
avgPrice <- function(p) apply(HLC(p), 1, mean)
addAvgPrice <- newTA(FUN = avgPrice, col = 1, legend = 'AvgPrice')
addT.ind <- newTA(FUN = T.ind, col = 'red', legend = 'tgtRet')
addAvgPrice(on = 1)
addT.ind()
IBEX35[,2] <- IBEX35[,2]+1e-6
IBEX35[,5] <- IBEX35[,5]+1e-6
emv <- EMV(HLC(IBEX35)[,-3], Vo(IBEX35), n=9, maType="EMA", vol.divisor=10000)
myATR <- function(x) ATR(HLC(x))[,'atr']
mySMI <- function(x) SMI(HLC(x))[,'SMI']
myADX <- function(x) ADX(HLC(x))[,'ADX']
myAroon <- function(x) aroon(x[,c('High','Low')])$oscillator
myBB <- function(x) BBands(HLC(x))[,'pctB']
myChaikinVol <- function(x) Delt(chaikinVolatility(x[,c("High","Low")]))[,1]
myCLV <- function(x) EMA(CLV(HLC(x)))[,1]
myEMV <- function(x) EMV(x[,c('High','Low')],x[,'Volume'])[,2]
myMACD <- function(x) MACD(Cl(x))[,2]
myMFI <- function(x) MFI(x[,c("High","Low","Close")], x[,"Volume"])
mySAR <- function(x) SAR(x[,c('High','Close')]) [,1]
myVolat <- function(x) volatility(OHLC(x),calc="garman")[,1]
library(randomForest)
colnames(IBEX35) <- c("Open", "High", "Low", "Close","Volume","Adjusted")
data.model <- specifyModel(T.ind(IBEX35) ~ Delt(Cl(IBEX35),k=1:10) +
myATR(IBEX35) + mySMI(IBEX35) + myADX(IBEX35) + myAroon(IBEX35) +
myBB(IBEX35) + myChaikinVol(IBEX35) + myCLV(IBEX35) +
CMO(Cl(IBEX35)) + EMA(Delt(Cl(IBEX35))) + myEMV(IBEX35) +
myVolat(IBEX35) + myMACD(IBEX35) + myMFI(IBEX35) + RSI(Cl(IBEX35)) +
mySAR(IBEX35) + runMean(Cl(IBEX35)) + runSD(Cl(IBEX35)))
set.seed(1234)
rf <- buildModel(data.model, method = 'randomForest',
training.per = c(start(IBEX35),index(IBEX35["2016-02-01"])),
ntree = 500, importance = T)
ex.model <- specifyModel(T.ind(CABK.MC) ~ Delt(Cl(CABK.MC),k = 1:3))
data <- modelData(ex.model, data.window = c('2015-09-01','2016-02-01'))
varImpPlot(rf@fitted.model, type = 1)
imp <- importance(rf@fitted.model, type = 1)
rownames(imp)[which (imp > 10)]
data.model <- specifyModel(T.ind(IBEX35) ~ Delt(Cl(IBEX35),k=3) +
Delt(Cl(IBEX35),k=5) + Delt(Cl(IBEX35),k=7) + Delt(Cl(IBEX35),k=8) +
Delt(Cl(IBEX35),k=10) + myATR(IBEX35) + mySMI(IBEX35) + myADX(IBEX35) +
myAroon(IBEX35) + myCLV(IBEX35))
Tdata.train <- as.data.frame(modelData(data.model,
data.window = c('1973-03-01','1999-12-31')))
Tdata.eval <- na.omit(as.data.frame(modelData(data.model,
data.window = c('2000-01-01','2016-04-30'))))
Tform <- as.formula('T.ind.IBEX35 ~ .')
### The Prediction Models
set.seed(1234)
library(nnet)
norm.data <- scale(Tdata.train)
nn <- nnet(Tform, norm.data[1:1000,], size = 10, decay = 0.01,
maxit = 1000, linout = T, trace = T)
norm.preds <- predict(nn, norm.data[1001:2000, ])
preds <- unscale(norm.preds, norm.data)
sigs.nn <- trading.signals(preds, 0.1, -0.1)
true.sigs <- trading.signals(Tdata.train[1001:2000, "T.ind.IBEX35"], 0.1, -0.1)
sigs.PR(sigs.nn, true.sigs)
signals <- trading.signals(Tdata.train[, "T.ind.IBEX35"], 0.1, -0.1)
norm.data <- data.frame(signals = signals, scale(Tdata.train[, -1]))
nn <- nnet(signals ~ ., norm.data[1:1000, ], size = 10, decay = 0.01,
maxit = 1000, trace = T)
preds <- predict(nn, norm.data[1001:2000, ], type = "class")
sigs.PR(preds, norm.data[1001:2000, 1])
library(e1071)
sv <- svm(Tform, Tdata.train[1:1000, ], gamma = 0.001, cost = 100)
s.preds <- predict(sv, Tdata.train[1001:2000, ])
sigs.svm <- trading.signals(s.preds, 0.1, -0.1)
true.sigs <- trading.signals(Tdata.train[1001:2000, "T.ind.IBEX35"], 0.1, -0.1)
sigs.PR(sigs.svm, true.sigs)
library(kernlab)
data <- cbind(signals = signals, Tdata.train[, -1])
ksv <- ksvm(signals ~ ., data[1:1000, ], C = 10)
ks.preds <- predict(ksv, data[1001:2000, ])
sigs.PR(ks.preds, data[1001:2000, 1])
library(earth)
e <- earth(Tform, Tdata.train[1:1000, ])
e.preds <- predict(e, Tdata.train[1001:2000, ])
sigs.e <- trading.signals(e.preds, 0.1, -0.1)
true.sigs <- trading.signals(Tdata.train[1001:2000, "T.ind.IBEX35"], 0.1, -0.1)
sigs.PR(sigs.e, true.sigs)
### From Prediction to Action
policy.1 <- function(signals, market, opened.pos, money, bet = 0.2, hold.time = 10,
exp.prof = 0.025, max.loss = 0.05) {
d <- NROW(market) # this is the ID of today
orders <- NULL
nOs <- NROW(opened.pos)
if(!nOs && signals[d] == 'h') return(orders)
#First lets check if we can open new positions
# i) long positions
if(signals[d] == 'b' && !nOs){
quant <- round(bet * money/market[d,'Close'],0)
if(quant > 0)
orders <- rbind(orders,
data.frame(order = c(1, -1, -1), order.type = c(1,2,3),
val = c(quant,
market[d, 'Close']*(1 + exp.prof),
market[d, 'Close']*(1 - max.loss)
),
action = c('open','close','close'),
posID = c(NA, NA, NA)
)
)
# ii) short position
} else if (signals[d] == 's' && !nOs){
# this is the nr of stocks we already need to buy
# because of currently opened short positions
need2buy <- sum(opened.pos[opened.pos[,'pos.type'] == -1,
"N.stocks"])*market[d,'Close']
quant <- round(bet*(money - need2buy)/market[d, 'Close'],0)
if(quant > 0)
orders <- rbind(orders,
data.frame(order = c(-1, 1, 1), order.type = c(1,2,3),
val = c(quant,
market[d, 'Close']*(1 - exp.prof),
market[d, 'Close']*(1 + max.loss)
),
action = c('open','close','close'),
posID = c(NA, NA, NA)
)
)
}
# Now lets check if we need to close positions
# because their holding time is over
if (nOs)
for(i in 1:nOs){
if (d - opened.pos[i, 'Odate'] >= hold.time)
orders <- rbind(orders,
data.frame(order =- opened.pos[i,'pos.type'],
order.type = 1,
val = NA,
action = 'close',
posID = rownames(opened.pos)[i]
)
)
}
orders
}
policy.2 <- function(signals, market, opened.pos, money, bet = 0.2,
exp.prof = 0.025, max.loss = 0.05) {
d <- NROW(market) # this is the ID of today
orders <- NULL
nOs <- NROW(opened.pos)
if(!nOs && signals[d] == 'h') return(orders)
#First lets check if we can open new positions
# i) long positions
if(signals[d] == 'b'){
quant <- round(bet * money/market[d,'Close'],0)
if(quant > 0)
orders <- rbind(orders,
data.frame(order = c(1, -1, -1), order.type = c(1,2,3),
val = c(quant,
market[d, 'Close']*(1 + exp.prof),
market[d, 'Close']*(1 - max.loss)
),
action = c('open','close','close'),
posID = c(NA, NA, NA)
)
)
# ii) short position
} else if (signals[d] == 's'){
# this is the nr of stocks we already need to buy
# because of currently opened short positions
need2buy <- sum(opened.pos[opened.pos[,'pos.type'] == -1,
"N.stocks"])*market[d,'Close']
quant <- round(bet*(money - need2buy)/market[d, 'Close'],0)
if(quant > 0)
orders <- rbind(orders,
data.frame(order = c(-1, 1, 1), order.type = c(1,2,3),
val = c(quant,
market[d, 'Close']*(1 - exp.prof),
market[d, 'Close']*(1 + max.loss)
),
action = c('open','close','close'),
posID = c(NA, NA, NA)
)
)
}
orders
}
# Train and test periods
start <- 100
len.tr <- 2000
len.ts <- 500
tr <- start:(start + len.tr -1)
ts <- (start + len.tr):(start + len.tr + len.ts -1)
# getting quotes for the testing period
date <- rownames(Tdata.train[start + len.tr,])
market <- IBEX35[paste(date, '/',sep = '')][1:len.ts]
# learning the model and obtaining its signal predictions
s <- svm(Tform, Tdata.train[tr,], cost = 10, gamma = 0.01)
p <- predict(s, Tdata.train[ts,])
sig <- trading.signals(p, 0.1, -0.1)
# Now using the simulator trader
t1 <- trading.simulator(market, sig, 'policy.1', list(exp.prof = 0.05,
bet = 0.2, hold.time = 30))
tradingEvaluation(t1)
plot(t1, market, theme = 'white', name = 'IBEX35')
t2 <- trading.simulator(market, sig, 'policy.2', list(exp.prof = 0.05,
bet = 0.3))
tradingEvaluation(t2)
plot(t2, market, theme = 'white', name = 'IBEX35')
start <- 100
len.tr <- 1000
len.ts <- 500
tr <- start:(start + len.tr -1)
ts <- (start + len.tr):(start + len.tr + len.ts -1)
s <- svm(Tform, Tdata.train[tr,], cost = 10, gamma = 0.01)
p <- predict(s, Tdata.train[ts,])
sig <- trading.signals(p, 0.1, -0.1)
t2 <- trading.simulator(market, sig, 'policy.2', list(exp.prof = 0.05,
bet = 0.2))
summary(t2)
tradingEvaluation(t2)
### Model Evaluation and Selection
MC.svmR <- function(form, train, test, b.t = 0.1, s.t = -0.1, ...) {
require(e1071)
t <- svm(form, train, ...)
p <- predict(t, test)
trading.signals(p, b.t, s.t)
}
MC.svmC <- function(form, train, test, b.t = 0.1, s.t = -0.1, ...) {
require(e1071)
tgtName <- all.vars(form)[1]
train[, tgtName] <- trading.signals(train[, tgtName], b.t, s.t)
t <- svm(form, train, ...)
p <- predict(t, test)
factor(p, levels=c('s', 'h', 'b'))
}
MC.nnetR <- function(form, train, test, b.t = 0.1, s.t = -0.1, ...) {
require(nnet)
t <- nnet(form, train, ...)
p <- predict(t, test)
trading.signals(p, b.t, s.t)
}
MC.nnetC <- function(form, train, test, b.t = 0.1, s.t = -0.1, ...) {
require(nnet)
tgtName <- all.vars(form)[1]
train[, tgtName] <- trading.signals(train[, tgtName], b.t, s.t)
t <- nnet(form, train, ...)
p <- predict(t, test, type = 'class')
factor(p, levels=c('s', 'h', 'b'))
}
MC.earth <- function(form, train, test, b.t = 0.1, s.t = -0.1, ...) {
require(earth)
t <- earth(form, train, ...)
p <- predict(t, test)
trading.signals(p, b.t, s.t)
}
singleModel <- function(form, train, test, learner, policy.func, ...){
p <- do.call(paste('MC', learner, sep = '.'), list(form, train, test, ...))
eval.stats(form, train, test, p, policy.func = policy.func)
}
slide <- function(form, train, test, learner, relearn.step, policy.func, ...){
real.learner <- learner(paste('MC', learner, sep = '.'), pars = list( ...))
p <- slidingWindowTest(real.learner, form, train, test, relearn.step)
p <- factor(p, levels = 1:3, labels = c('s', 'h', 'b'))
eval.stats(form, train, test, p, policy.func = policy.func)
}
grow <- function(form, train, test, learner, relearn.step, policy.func, ...){
real.learner <- learner(paste('MC', learner, sep = '.'), pars = list( ...))
p <- growingWindowTest(real.learner, form, train, test, relearn.step)
p <- factor(p, levels = 1:3, labels = c('s', 'h', 'b'))
eval.stats(form, train, test, p, policy.func = policy.func)
}
eval.stats <- function(form, train, test, preds, b.t = 0.1, s.t = -0.1, ...) {
# Signals evaluation
tgtName <- all.vars(form)[1]
test[,tgtName] <- trading.signals(test[,tgtName], b.t, s.t)
st <- sigs.PR(preds, test[,tgtName])
dim(st) <- NULL
names(st) <- paste(rep(c('prec','rec'),each = 3), c('s', 'b', 'sb'), sep = '.')
# Trading evaluation
date <- rownames(test)[1]
market <- IBEX35[paste(date, "/", sep = '')][1:length(preds),]
trade.res <- trading.simulator(market, preds, ...)
c(st, tradingEvaluation(trade.res))
}
pol1 <- function(signals, market, op, money)
policy.1(signals, market, op, money,
bet = 0.2, exp.prof = 0.25, max.loss = 0.05, hold.time = 10)
pol2 <- function(signals, market, op, money)
policy.1(signals, market, op, money,
bet = 0.2, exp.prof = 0.25, max.loss = 0.05, hold.time = 20)
pol3 <- function(signals, market, op, money)
policy.2(signals, market, op, money,
bet = 0.2, exp.prof = 0.25, max.loss = 0.05)
# This list of learners we will use
TODO <- c('svmR', 'svmC', 'earth', 'nnetR', 'nnetC')
# The datasets used in the comparison
DSs <- list(dataset(Tform, Tdata.train, 'CABK.MC'))
# Monte Carlo setting used
MCsetts <- mcSettings(10,
50,
30,
2)
# Variants to try for all learners
VARS <- list()
VARS$svmR <- list(cost = c(10, 150), gamma =c(0.01, 0.001),
policy.func = c('pol1','pol2','pol3'))
VARS$svmC <- list(cost = c(10, 150), gamma =c(0.01, 0.001),
policy.func = c('pol1','pol2','pol3'))
VARS$earth <- list(nk = c(10, 17), degree =c(1, 2), thresh = c(0.01, 0.001),
policy.func = c('pol1','pol2','pol3'))
VARS$nnetR <- list(linout = T, maxit =750, size = c(5, 10),
decay = c(0.001, 0.01), policy.func = c('pol1','pol2','pol3'))
VARS$nnetC <- list(maxit =750, size = c(5, 10), decay = c(0.001, 0.01),
policy.func = c('pol1','pol2','pol3'))
#main loop
# main loop
for(td in TODO) {
assign(td,
experimentalComparison(
DSs,
c(
do.call('variants',
c(list('singleModel',learner = td), VARS[[td]],
varsRootName = paste('single',td, sep = '.'))),
do.call('variants',
c(list('slide',learner = td, relearn.step = c(60,120)),
VARS[[td]],
varsRootName = paste('slide', td, sep='.'))),
do.call('variants',
c(list('grow',learner=td,
relearn.step = c(60,120)),
VARS[[td]],
varsRootName = paste('grow', td, sep = '.')))
),
MCsetts)
)
# save the results
save(list = td,file = paste(td, 'Rdata', sep = '.'))
}
load('svmR.Rdata')
load('svmC.Rdata')
load('earth.Rdata')
load('nnetR.Rdata')
load('nnetC.Rdata')
tgtStats <- c('prec.sb','Ret','PercProf','MaxDD','SharpeRatio')
allSysRes <- join(subset(svmR, stats = tgtStats),
subset(svmC, stats = tgtStats),
subset(nnetR, stats = tgtStats),
subset(nnetC, stats = tgtStats),
subset(earth, stats = tgtStats),
by = 'variants')
rankSystems(allSysRes, 5, maxs = c(T, T, T, T, T))
summary(subset(svmC, stats = c('Ret', 'RetOverBH', 'PercProf', 'NTrades'),
vars = c('slide.svmC.v5', 'slide.svmC.v6')))
fullResults <- join(svmR, svmC, earth, nnetC, nnetR, by = "variants")
nt <- statScores(fullResults, "NTrades")[[1]]
rt <- statScores(fullResults, "Ret")[[1]]
pp <- statScores(fullResults, "PercProf")[[1]]
s1 <- names(nt)[which (nt > 32)]
s2 <- names(rt)[which (rt > 8)]
s3 <- names(pp)[which (pp > 5)]
namesBest <- intersect(intersect(s1, s2), s3)
summary(subset(fullResults, stats = tgtStats, vars = namesBest))
compAnalysis(subset(fullResults, stats = tgtStats, vars = namesBest ))
plot(subset(fullResults, stats = c('Ret', 'RetOverBH', 'MaxDD'), vars = namesBest))
getVariant("single.earth.v20", earth)
getVariant("grow.earth.v40", earth)
# The Trading System
data <- tail(Tdata.train, 100)
results <- list()
for(name in namesBest) {
sys <- getVariant(name, fullResults)
results[[name]] <- runLearner(sys, Tform, data, Tdata.eval)
}
results <- t(as.data.frame(results))
results[, c("Ret", "RetOverBH", "MaxDD", "SharpeRatio", "NTrades", "PercProf")]
getVariant('grow.earth.v40', fullResults)
data <- tail(Tdata.train, 50)
results <- list()
for (name in namesBest) {
sys <- getVariant(name, fullResults)
results[[name]] <- runLearner(sys, data, Tdata.eval)
}
results <- t(as.data.frame(results))
results[, c("Ret", "RetOverBH", "MaxDD", "SharpeRatio", "NTrades", "PercProf")]
getVariant("grow.earth.v40", fullResults)
model <- learner("MC.nnetR", list(maxit = 750, linout = T, trace = F, size = 10,
decay = 0.01))
preds <- growingWindowTest(model, Tform, data, Tdata.eval, relearn.step = 120)
signals <- factor(preds, levels = 1:3, labels = c("s", "h", "b"))
date <- rownames(Tdata.eval)[1]
market <- IBEX35[paste(date, "/", sep = '')][1:length(signals),]
trade.res <- trading.simulator(market, signals, policy.func = "pol3")
plot(trade.res, market, theme = "white", name = "CABK - final test")
library(PerformanceAnalytics)
rets <- Return.calculate(as.xts(trade.res@trading$Equity))
chart.CumReturns(rets, main = "Cumlative returns of startegy", ylab = "returns")
yearlyReturn(as.xts(trade.res@trading$Equity))
plot(100 * yearlyReturn(as.xts(trade.res@trading$Equity)),
main = 'Yearly percentage returns of the trading system')
abline(h = 0, lty = 2)
table.CalendarReturns(R = rets)
table.AnnualizedReturns(rets)
table.DownsideRisk(rets)
|
3c3395f5256bb22efb3f9a39cfebd075ce995632
|
bc73b51699fb07f7dc81ea189cfbc2e086ffa794
|
/man/getShapleyPredictionResponse.Rd
|
054e759a742a76d6e62d750ff76d228686cfbc93
|
[] |
no_license
|
redichh/ShapleyR
|
ec6c8bfac9925ac6bdc89110c8ba48072bf5278c
|
a977c041519e19f9b7f230e47c79235b3194d4f0
|
refs/heads/master
| 2021-07-06T04:58:19.363784
| 2019-03-05T22:55:16
| 2019-03-05T22:55:16
| 113,659,609
| 29
| 10
| null | 2019-03-05T22:55:17
| 2017-12-09T10:02:17
|
R
|
UTF-8
|
R
| false
| true
| 392
|
rd
|
getShapleyPredictionResponse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getters.R
\name{getShapleyPredictionResponse}
\alias{getShapleyPredictionResponse}
\title{Getter function for a shapley object.}
\usage{
getShapleyPredictionResponse(shapley.list)
}
\arguments{
\item{shapley.list}{A shapley object.}
}
\description{
Returns the prediction response from the given shapley object.
}
|
3eef7bbc986603c93910100bbeffb82e04286f92
|
5eca3fb0e511160771fab5cb8bc68c239fdc707f
|
/man/dfToMap.Rd
|
1dd857cf241fc144f3ffd27763e63bc7b6e6f0ee
|
[] |
no_license
|
kelly-jamrog/fredMaps
|
71c7b78b6c77864435e72ce9eb6e5e70acd6bf7f
|
7f5f680b89222a6cfd457447e88aa57e4bd7971a
|
refs/heads/master
| 2020-04-11T11:00:19.254693
| 2018-12-14T05:06:19
| 2018-12-14T05:06:19
| 161,733,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 491
|
rd
|
dfToMap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfToMap.R
\name{dfToMap}
\alias{dfToMap}
\title{Data Frame to Map}
\usage{
dfToMap(map.df, fill = NULL)
}
\arguments{
\item{map.df}{The directory containing the shapefile}
\item{fill}{The column to be used as the fill}
}
\value{
A ggplot2 object
}
\description{
\code{dfToMap} plots dataframes of the class \code{map.df} using \code{ggplot2}.
}
\details{
See vignette for examples on how to use this function.
}
|
e23caed6328fc773c26a8eb94d5f7d93ded271e6
|
4ae5c00b3152c24a2de6cd93ef512aa663eb6882
|
/methods.R
|
1b22941123194f3b37dc14f1b7da2640a1a50430
|
[] |
no_license
|
svkucheryavski/rpcagui
|
d69082202d53ddea0e21be23014aec1af3258936
|
72002ac5e07fa00707b7903942c3fcd86c328c1e
|
refs/heads/master
| 2021-01-25T06:17:54.504521
| 2017-06-07T12:06:14
| 2017-06-07T12:06:14
| 93,547,983
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,290
|
r
|
methods.R
|
getModel = function(project, input, P) {
T = project$data %*% P
expvar = rep(0, ncol(P))
totvar = sum(project$data^2)
for (i in 1:ncol(P)) {
xx = T[, i, drop = F] %*% t(P[, i, drop = F])
expvar[i] = sum(xx^2) / totvar
}
model = list(P = P, T = T, expvar = expvar)
model
}
getP = function(project, input) {
if (input$method == 'svd'){
if (input$algorithm == 'rand')
P = nipalspca(getB(project$data, k = input$ncomp, q = input$q, p = input$p), input$ncomp)
else
P = svdpca(project$data, input$ncomp)
} else if (input$method == 'nipals') {
if (input$algorithm == 'rand')
P = nipalspca(getB(project$data, k = input$ncomp, q = input$q, p = input$p), input$ncomp)
else
P = nipalspca(project$data, input$ncomp)
} else if (input$method == 'eigcov') {
if (input$algorithm == 'rand')
P = nipalspca(getB(project$data, k = input$ncomp, q = input$q, p = input$p), input$ncomp)
else
P = svdpca(project$data, input$ncomp)
}
P
}
getB = function(X, k = NULL, q = 0, p = 0, dist = 'unif') {
nrows = nrow(X)
ncols = ncol(X)
if (is.null(k))
k = ncols
l = k + p
if (dist == 'unif')
Y = X %*% matrix(runif(ncols * l, -1, 1), ncols, l)
else
Y = X %*% matrix(rnorm(ncols * l), ncols, l)
Q = qr.Q(qr(Y))
if (q > 0) {
for (i in 1:niter) {
Y = crossprod(X, Q)
Q = qr.Q(qr(Y))
Y = X %*% Q
Q = qr.Q(qr(Y))
}
}
B = crossprod(Q, X)
B
}
svdpca = function(X, ncomp = 4) {
P = svd(X, nu = ncomp, nv = ncomp)$v
P = P[, 1:ncomp]
P
}
nipalspca = function(X, ncomp = 4) {
nobj = nrow(X)
nvar = ncol(X)
P = matrix(0, nrow = nvar, ncol = ncomp)
E = X
for (i in 1:ncomp) {
ind = which.max(apply(E, 2, sd))
t = E[, ind, drop = F]
tau = 9999999999999999
th = 99999999999999
t0 = t - t
while (sqrt(as.vector(crossprod(t - t0))) > 0.00001) {
p = crossprod(E, t) / as.vector(crossprod(t))
p = p / as.vector(crossprod(p)) ^ 0.5
t0 = t
t = (E %*% p)/as.vector(crossprod(p))
}
E = E - tcrossprod(t, p)
P[, i] = p
}
P
}
eigcovpca = function(X, ncomp = 4) {
covX = cov(X) / (nrow(X) - 1)
e = eigen(covX)
P = e$vectors[, 1:ncomp]
P
}
|
3ca3eaed8949c40cad2c23a23a7eb9218ef24423
|
e25aa8717cccfbc951c398dae9e6eb12a2619c92
|
/functions.R
|
e5d76c6c4e94a1c613ddbb8f5b5ebc709b28a65d
|
[
"MIT"
] |
permissive
|
paulsharpeY/ab-meta
|
e1e8f31bbc4cf75fb5515552b3d2af6c0e9cf4f6
|
f762302ccf9db08600423cc0d1d9d3f2a904f025
|
refs/heads/main
| 2023-05-28T22:15:08.906478
| 2021-06-12T19:58:15
| 2021-06-12T19:58:15
| 373,577,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,231
|
r
|
functions.R
|
# meta-analysis functions
# If SMD, calculate pooled SD as per Cochrane Handbook, which in 6.3.1, says to use Hedge's g
# Standardized Mean Difference (SMD, Cohen's d)
# https://training.cochrane.org/handbook/current/chapter-06#section-6-5-1
# I don't think https://training.cochrane.org/handbook/current/chapter-23#section-23-2-7
# is relevant, as these are parallel group trials, not crossover trials
## BOOKMARK generic functions
get_hostname <- function(){
return(as.character(Sys.info()["nodename"]))
}
#' Mean difference
#'
#' Returns (m1 - m2) / sd. If sd is the pooled standard deviation, then this is Hedge's g.
#'
#' @param m1 mean 1
#' @param m2 mean 2
#' @param sd standard deviation
#' @return numeric
d <- function(m1, m2, sd) {
# https://www.statisticshowto.com/hedges-g/
(m1 - m2) / sd
}
#' Pooled standard deviation
#'
#' Returns the pooled standard deviation for two groups with same or different n.
#'
#' @param n1 n for group 1
#' @param n2 n for group 2
#' @param sd1 standard deviation for group 1
#' @param sd2 standard deviation for group 2
#' @return numeric
sd_pooled <- function(n1, n2, sd1, sd2) {
# https://www.statisticshowto.com/pooled-standard-deviation/
sqrt(((n1 - 1) * sd1^2 + (n2 - 1) * sd2^2) / (n1 + n2 - 2))
}
#' Convert 95% confidence interval to standard error
#'
#' https://training.cochrane.org/handbook/current/chapter-06#section-6-3-1
#'
#' @param u upper interval
#' @param l lower interval
#' @return numeric
ci95_to_se <- function(u, l) { (u - l) / 3.92 }
#' 95% confidence interval for Cohen's d (Rosnow & Rosenthal, 2009)
#'
#' @param d Cohen's d
#' @param n1 n in group1
#' @param n2 n in group2
#' @return numeric
d_ci95 <- function(d, n1, n2) {
df <- n1 + n2 - 2
sqrt((((n1 + n2) / (n1 * n2)) + (d^2 / (2 * df))) * ((n1 + n2) / df))
}
#' Set effect size for a study
#'
#' Calculates mean difference and 95% confidence interval.
#'
#' @param study data frame with publication column
#' @param m1 mean for group 1
#' @param m2 mean for group 2
#' @param sd standard deviation
#' @param n1 n in group 1
#' @param n2 n in group 2
#' @return tibble
#'
#' Assumes data = (study, mean1, mean2, sd, n1, n2, group)
set_effect <- function(data) {
v <- select(data, 2:7)
oldnames <- names(v)
newnames <- c('mean1', 'mean2', 'sd', 'n1', 'n2', 'group')
v <- v %>%
rename_with(~ newnames[which(oldnames == .x)], .cols = oldnames)
df <- tibble(
study = data$study,
mean1 = v$mean1,
mean2 = v$mean2,
d = d(v$mean1, v$mean2, v$sd),
ci = 0, l = 0, u = 0
) %>% mutate(
ci = d_ci95(d, v$n1, v$n2),
l = d - .data$ci,
u = d + .data$ci,
group = v$group
)
}
brms_function <- function(model, data = dat, average_effect_label = 'Pooled effect',
iter = '50e4', sd_prior = "cauchy(0, .3)", adapt_delta = 0.8, cache_file = 'foo') {
## https://github.com/mvuorre/brmstools
## https://vuorre.netlify.com/post/2016/09/29/meta-analysis-is-a-special-case-of-bayesian-multilevel-modeling/
# store study names to avoid clashes with commas in spread_draws() column specifications
data <- data %>%
mutate(study_number = as.numeric(rownames(data)))
study_names <- data %>% select(study_number, Study)
model <- brm(
d | se(se) ~ 1 + (1 | study_number),
data = data,
chains=8, iter=iter,
prior = c(prior_string("normal(0,1)", class = "Intercept"),
prior_string(sd_prior, class = "sd")),
control = list(adapt_delta = adapt_delta),
file = paste(cache_file, 'brms', sep = '-')
)
# For an explanation of tidybayes::spread_draws(), refer to http://mjskay.github.io/tidybayes/articles/tidy-brms.html
# Study-specific effects are deviations + average
draws <- spread_draws(model, r_study_number[study_number, term], b_Intercept) %>%
rename(b = b_Intercept) %>%
mutate(b = r_study_number + b) %>%
left_join(study_names, by = 'study_number')
# Average effect
draws_overall <- spread_draws(model, b_Intercept) %>%
rename(b = b_Intercept) %>%
mutate(Study = average_effect_label)
# Combine average and study-specific effects' data frames
combined_draws <- bind_rows(draws, draws_overall) %>%
ungroup() %>%
mutate(Study = fct_relevel(Study, average_effect_label, after = Inf)) # put overall effect after individual studies
# summarise in metafor format
metafor <- group_by(combined_draws, Study) %>%
mean_hdci(b) %>% # FIXME: parameterise interval
rename(est = b, ci_low = .lower, ci_high = .upper)
# free memory?
rm(model, draws, draws_overall, combined_draws)
gc()
return(metafor)
}
create_title_row <- function(title){
return(data.frame(Study = title, est = NA, ci_low = NA, ci_high = NA))
}
brms_object_to_table <- function(model, table, overall_estimate = FALSE, subset_col = "Overall",
subset_col_order = NULL, iter = '1e4', sd_prior = "cauchy(0, .3)",
adapt_delta = 0.8, cache_label = 'foo') {
table <- left_join(data.frame(model$data %>% select(Study, d, se)), table, by = 'Study')
# Reorder data
table <- select(table, Study, everything())
# Clean level names so that they look nice in the table
table[[subset_col]] <- str_to_sentence(table[[subset_col]])
levels <- unique(table[[subset_col]])
if(!(is.null(subset_col_order))){
levels <- intersect(subset_col_order, levels)
}
# Work out if only one level is present. Passed to create_subtotal_row(), so
# that if only one group, no subtotal is created.
single_group <- ifelse(length(levels)==1, TRUE, FALSE)
# Subset data by levels, run user-defined metafor function on them, and
# recombine along with Overall rma output
subset <- lapply(levels, function(level){filter(table, !!as.symbol(subset_col) == level)})
names(subset) <- levels
# model each data subset
subset_res <- lapply(levels, function(level){brms_function(model, data = subset[[level]],
iter = iter, sd_prior = sd_prior, adapt_delta = adapt_delta,
cache_file = paste(cache_label, level, sep = '-'))})
names(subset_res) <- levels
# This binds the table together
subset_tables <-
lapply(levels, function(level){
rbind(
create_title_row(level),
dplyr::select(subset_res[[level]], Study, .data$est, .data$ci_low, .data$ci_high)
)
})
subset_table <- do.call("rbind", lapply(subset_tables, function(x) x))
ordered_table <- rbind(subset_table,
if (overall_estimate) {
create_subtotal_row(rma, "Overall", add_blank = FALSE)
})
# Indent the studies for formatting purposes
ordered_table$Study <- as.character(ordered_table$Study)
ordered_table$Study <- ifelse(!(ordered_table$Study %in% levels) & ordered_table$Study != "Overall",
paste0(" ", ordered_table$Study),
ordered_table$Study)
return(ordered_table)
}
## BOOKMARK: Attentional Blink (AB) functions
# set pre-post attentional blink differences for treatment and control groups
ab_set_diff <- function(df) {
# SDs are pooled from pre and post AB
# May need "Imputing a change-from-baseline standard deviation using a correlation coefficient" (Higgins et al., 2019)
df %>% mutate(
treatment.diff.m = treatment.pre.m - treatment.post.m,
treatment.diff.sd = sd_pooled(treatment.n, treatment.n, treatment.pre.sd, treatment.post.sd),
control.diff.m = control.pre.m - control.post.m,
control.diff.sd = sd_pooled(control.n, control.n, control.pre.sd, control.post.sd)
)
}
## BOOKMARK: Attention Network Test (ANT) functions
#' Compute ANT scores for alerting, orienting and conflict.
#'
#' @param df Data frame
#' @importFrom dplyr group_by left_join mutate select
#' @importFrom forcats fct_relevel
#' @importFrom magrittr %>%
#' @importFrom rlang .data
#' @importFrom tidyr pivot_longer pivot_wider unite
#' @export
#' @return Data frame
#'
ant_scores <- function(df) {
alerting_orienting <- df %>%
pivot_wider(id_cols = c(.data$p,.data$group,.data$t), names_from = .data$cue,
values_from = .data$rt, values_fn = list(rt = mean)) %>%
mutate(alerting = .data$nocue - .data$double, orienting = .data$center - .data$spatial) %>%
select(.data$p, .data$group, .data$t, .data$alerting, .data$orienting)
conflict <- df %>%
pivot_wider(id_cols = c(.data$p,.data$group,.data$t),
names_from = .data$flanker_type, values_from = .data$rt,
values_fn = list(rt = mean)) %>%
mutate(conflict = .data$incongruent - .data$congruent) %>%
select(.data$p, .data$group, .data$t, .data$conflict)
result <- left_join(alerting_orienting, conflict, by=c('p', 'group', 't')) %>%
pivot_longer(cols = c(.data$alerting, .data$orienting, .data$conflict),
names_to = 'var', values_to = 'rt') %>%
mutate(var = factor(var))
# arrange for plot facets to be LtR: Alerting, Orienting, Conflict
result$var <- fct_relevel(result$var, 'conflict', after = Inf)
return(result)
}
|
f6aa54f64a5bf3e3e23c9415a116cfc7cb77185f
|
d4ae1c925add14251620f7fa4b92d52573185829
|
/SNU/R/Visualization/Exercise/(170818) Skillcraft.R
|
3742ae98eedeb840f0614021de7c586e2c7ae279
|
[] |
no_license
|
kookoowaa/Repository
|
eef9dce70f51696e35cec6dc6a5d4ce5ba28c6d7
|
26f9016e65dbbc6a9669a8a85d377d70ca8a9057
|
refs/heads/master
| 2023-03-08T01:36:29.524788
| 2023-02-21T16:11:43
| 2023-02-21T16:11:43
| 102,667,024
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,332
|
r
|
(170818) Skillcraft.R
|
library('reshape2')
library('dplyr')
library('gridExtra')
game_skill = read.csv('./data/wk2/SkillCraft1_Dataset.csv')
head(game_skill)
str(game_skill)
narm_gs = game_skill[game_skill$TotalHours != '?',]
narm_gs$Age = as.integer(as.character(narm_gs$Age))
narm_gs$HoursPerWeek = as.integer(as.character(narm_gs$HoursPerWeek))
narm_gs$TotalHours = as.integer(as.character(narm_gs$TotalHours))
narm_gs$LeagueIndex = as.factor(narm_gs$LeagueIndex)
narm_gs$GameID = as.character(narm_gs$GameID)
plot(narm_gs$Age, narm_gs$LeagueIndex)
variables = names(narm_gs)[-(1:2)]
new_rate = vector('numeric', length = length(narm_gs$LeagueIndex))
n = 1
for (i in 1:6){
new_rate[narm_gs$LeagueIndex == i] = n
if(i%%2 == 0) {
n = n+1
}
}
new_rate[narm_gs$LeagueIndex == 7] = 7
new_rate = as.factor(new_rate)
levels(new_rate) = c('noob','casual','pro','Godlike')
### The ratio to each Tier is as follows: (4% : 23% : 23% : 23% : 23% : 4%) excl. Grand Master Tier
for (i in 3:20){
plot(narm_gs[,i], new_rate, main = names(narm_gs[i])[1], xlab = names(narm_gs[i]), ylab = 'tier', col = narm_gs$LeagueIndex)
}
adj_gs = narm_gs[-which(narm_gs$GameID=='5140'),]
adj_rate = new_rate[-which(narm_gs$GameID == '5140')]
for (i in 3:20){
plot(adj_gs[,i], adj_gs[,2], main = names(narm_gs[i]), xlab = names(adj_gs[i]), ylab = 'tier', pch = 20, col = adj_gs$LeagueIndex)
}
# 티어별 정보
tier = c('bronze','silver','gold','platinum','diamond','master','grand master')
n=1
for (i in tier){
adj_gs %>%
filter(LeagueIndex==n) %>%
select(Age:ComplexAbilitiesUsed) %>%
assign(i,., inherits = T)
n = n+1
}
grid.table(summary(bronze[,1:5]))
# sub-group
tier2 = c('noob','casual','pro','Godlike')
n=1
for (i in tier2){
adj_gs[as.numeric(adj_rate)==n,] %>%
select(Age:ComplexAbilitiesUsed) %>%
assign(i,., inherits = T)
n = n+1
}
dev.off()
par(mfrow=c(2,2))
boxplot(noob$Age, casual$Age, pro$Age, Godlike$Age, names = tier2, main = 'Age')
boxplot(noob$HoursPerWeek, casual$HoursPerWeek, pro$HoursPerWeek, Godlike$HoursPerWeek, names = tier2, main = 'HoursPerWeek')
boxplot(noob$TotalHours, casual$TotalHours, pro$TotalHours, Godlike$TotalHours, names = tier2, main = 'TotalHours', ylim = c(0,4000))
boxplot(noob$APM, casual$APM, pro$APM, Godlike$APM, names = tier2, main = 'APM')
boxplot(noob$SelectByHotkeys, casual$SelectByHotkeys, pro$SelectByHotkeys, Godlike$SelectByHotkeys, names = tier2, main = 'SelectByHotkeys')
boxplot(noob$AssignToHotkeys, casual$AssignToHotkeys, pro$AssignToHotkeys, Godlike$AssignToHotkeys, names = tier2, main = 'AssignToHotkeys')
boxplot(noob$UniqueHotkeys, casual$UniqueHotkeys, pro$UniqueHotkeys, Godlike$UniqueHotkeys, names = tier2, main = 'UniqueHotkeys')
boxplot(noob$MinimapAttacks, casual$MinimapAttacks, pro$MinimapAttacks, Godlike$MinimapAttacks, names = tier2, main = 'MinimapAttacks', ylim=c(0,0.002))
boxplot(noob$MinimapRightClicks, casual$MinimapRightClicks, pro$MinimapRightClicks, Godlike$MinimapRightClicks, names = tier2, main = 'MinimapRightClicks', ylim=c(0,0.002))
boxplot(noob$NumberOfPACs, casual$NumberOfPACs, pro$NumberOfPACs, Godlike$NumberOfPACs, names = tier2, main = 'NumberOfPACs')
boxplot(noob$GapBetweenPACs, casual$GapBetweenPACs, pro$GapBetweenPACs, Godlike$GapBetweenPACs, names = tier2, main = 'GapBetweenPACs')
boxplot(noob$ActionLatency, casual$ActionLatency, pro$ActionLatency, Godlike$ActionLatency, names = tier2, main = 'ActionLatency')
boxplot(noob$ActionsInPAC, casual$ActionsInPAC, pro$ActionsInPAC, Godlike$ActionsInPAC, names = tier2, main = 'ActionsInPAC', ylim=c(0,15))
boxplot(noob$TotalMapExplored, casual$TotalMapExplored, pro$TotalMapExplored, Godlike$TotalMapExplored, names = tier2, main = 'TotalMapExplored')
boxplot(noob$WorkersMade, casual$WorkersMade, pro$WorkersMade, Godlike$WorkersMade, names = tier2, main = 'WorkersMade')
boxplot(noob$UniqueUnitsMade, casual$UniqueUnitsMade, pro$UniqueUnitsMade, Godlike$UniqueUnitsMade, names = tier2, main = 'UniqueUnitsMade')
boxplot(noob$ComplexUnitsMade, casual$ComplexUnitsMade, pro$ComplexUnitsMade, Godlike$ComplexUnitsMade, names = tier2, main = 'ComplexUnitsMade', ylim=c(0,5e-04))
boxplot(noob$ComplexAbilitiesUsed, casual$ComplexAbilitiesUsed, pro$ComplexAbilitiesUsed, Godlike$ComplexAbilitiesUsed, names = tier2, main = 'ComplexAbilitiesUsed', ylim=c(0,0.002))
|
c304ac1c97af06dade4eaa4a17cdd2d2b0f95992
|
02f053ce70b065724d4a02619fb402adcc0ec997
|
/analysis/boot/boot1083.R
|
78f6892a6e2f3630698beca34abbf53d058e6d57
|
[] |
no_license
|
patperry/interaction-proc
|
27950482929240bba55c7d0f2f8c5235d770feea
|
cf8dfd6b5e1d0684bc1e67e012bf8b8a3e2225a4
|
refs/heads/master
| 2021-01-01T06:11:47.125853
| 2012-12-04T20:01:42
| 2012-12-04T20:01:42
| 673,564
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,758
|
r
|
boot1083.R
|
seed <- 1083
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225263.90727857643
df.resid <- 35402
df <- 165
coefs <- c(6.694504254974477, 5.792913422487578, 5.694633299496106, 5.402249852972263, 5.132565146815064, 4.751652177803302, 4.775254181351299, 4.622441268504947, 4.3738538925333295, 4.257225286255723, 4.378846808827093, 4.1997708039066675, 4.023048120201225, 3.955673459752256, 3.7796352959573905, 3.5501302451986634, 3.2693918670969278, 2.964384971699839, 2.509946980885257, 2.062326434680701, 1.578560095459602, 0.8939927409559483, 0.9066170659631491, 0.6264736145433176, 5.6154355178735356e-2, -0.8503257400197584, -0.3398142339314018, 0.9436841537802809, 1.0568923677771949, -1.1047487552642201, -2.0564765244447196, -1.7557370319739105, -0.26419795620129727, 0.6949771610929774, 1.202329642793386, -1.5014299906185877, -0.35612878874372794, -1.1676318058134907, -8.692602648194226e-3, -1.1021873522358898, 0.9763710475992685, 0.7106753474582679, -0.560242370976337, -1.8415822087092406, -1.1101710566363774, -0.6231684499122313, -0.47310057109753695, 0.18590837156812254, 0.12596388168606157, -0.8654269411444259, -0.1556160137870601, 0.9501106487457505, -2.609468076743121, 1.803253953919812, 0.6715839897677094, 1.013363112492643, -2.4883406501095697, -6.0916743134394935e-2, -0.40974130489257626, 1.818506178104154, 0.935348805291704, 0.7085290253647861, -2.3648566003272316, -0.9322912101065771, -0.5394352415745083, 0.3122024234487166, 0.6096803989439815, -0.5177312374081088, -1.629501298241637, -0.6276003772085206, -1.7039029352457855, -0.3389254229516474, 0.4674411065786682, 1.0252719631144085, 0.7709319983668161, -0.5896287488884812, -1.349102391212154, -1.2564260013934485, -4.899327473388816e-2, 0.7131549140519035, 1.3188503614649425, 0.13045828546979812, 0.1676584811840382, -2.2580038941882554, 9.270506832488012e-2, 0.3913229783580942, 1.2433035019444767, 0.2891289240171239, 0.8869852168170851, -2.735533909214502, 0.46436465672277927, 0.7234674097000441, 0.8351326973936962, 0.2661098313320619, 0.10710362230416481, 1.522599456468136, 0.43013070437735373, 0.6361680816884613, 0.17100622720105105, 3.253303004710909e-2, 0.5406996110064635, -0.43292008671836363, 0.6440935302646991, -3.694034236740344e-2, 0.8314854249773246, 0.8485302734032814, 1.0840546644580231, -0.44620376364408554, -0.24491104932485955, -1.1147184079875887, 0.5509782314704328, 0.6373939060890594, 1.6517001833691365, -0.43361138894692985, -0.2791657338788185, -1.146436579252661, 0.8087548726162324, -0.4161028521088728, 0.3919929202795906, 0.7565945333593448, -1.1292704006455807, -0.32032500605997416, -0.8818673029256723, -0.5819609067853757, 0.39996390159863515, 0.853804333162455, -6.46128260379732e-2, 0.8524126764423312, -0.3930549915614016, -0.40911091717222214, 6.981372921845423e-2, 0.9582062463848131, 0.8430657262052088, 0.2763651816011444, -6.673577021102985e-2, 1.1614775752501796, -0.37726534026059416, 1.1578864815798184, 0.6736665850890861, 0.8435480528044196, 0.8622958548032363, -0.6340789991237156, -1.0059300776990912, 0.6971830847679233, 0.3294627678796095, 0.45664389619498574, -0.122448613819254, -0.6485421626718164, -2.1716994023960488, 1.3665274503966214, 0.13099277252672947, 1.13669002666887, -2.416868580838191e-2, -6.308995359431176e-2, -0.33689523726573967, -2.0209084137932183, -1.5499621768080512, 0.8241267747032807, 1.2038111786671342, -0.15971785724135565, 1.5343581240186897, -0.24110984842830296, -0.18489120324315284, 1.57912075511106e-2, 1.2529154273031222)
|
312221710d13d94092bd0a87f8f1bdff36aef6ed
|
b1ea57ec8310183190bf951baa61a03ac63da994
|
/plot1.R
|
d5bd2e287ba529180c82a2ff3d182396dccb8243
|
[] |
no_license
|
Karlovik/ExData_Plotting1
|
b9ce53e669892b3f063690d7b5439f83ca90de1d
|
a3d838371decc769f87126717a31b6433e44d87c
|
refs/heads/master
| 2020-12-03T09:23:55.714440
| 2015-07-11T14:49:35
| 2015-07-11T14:49:35
| 38,878,079
| 0
| 0
| null | 2015-07-10T11:55:57
| 2015-07-10T11:55:57
| null |
UTF-8
|
R
| false
| false
| 552
|
r
|
plot1.R
|
graphics.off()
electric=read.table("household_power_consumption.txt",sep=";",head=TRUE)
electric$Date=as.Date(strptime(electric$Date,format="%d/%m/%Y"))
electricity=subset(electric,electric$Date>="2007-02-01"&electric$Date<="2007-02-02")
electricity$Global_active_power=as.numeric(as.character(electricity$Global_active_power))
par(bg="white")
hist(electricity$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.copy(png, filename = "plot1.png",width=480,height=480,unit="px")
dev.off()
graphics.off()
|
6ecd6b385113b3ab6c4d86ce24d25ef9c943249e
|
5d159cd8f4de6b6678a34701f47bc32bfe7d2473
|
/Logistic regression for text analysis.R
|
2239968a42bfaa28e56704942908a6c62a5cbb6c
|
[] |
no_license
|
v4leriya/RateMyProfessor
|
4c2ae42d8dfd1903f04ed9d554fcf69eee706919
|
2b6a4ee3de7b17546e462e173e2afeb4a08e2b07
|
refs/heads/master
| 2022-12-26T12:48:23.331693
| 2020-10-11T18:07:26
| 2020-10-11T18:07:26
| 299,160,634
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 3,164
|
r
|
Logistic regression for text analysis.R
|
#Load the required libraries
library(caTools)
library(ROCR)
library(tm)
library(SnowballC)
library(rpart)
library(rpart.plot)
library(tidyverse)
# Part 1---------------------------------------------------------------------------------------------
#read in csv file
rmp=read.csv(file=”RateMyProfessor_Sample data.csv)
#subset the data that contains non-missing observations for student_star the binary would_take_agains
rmp=drop_na(rmp, "student_star")
rmp=drop_na(rmp, "would_take_agains")
#create a sentiment variable
rmp_log$sentiment=ifelse(rmp_log$would_take_agains=="Yes", 1, 0)
#split the data into training and testing 70/30
split1=sample.split(rmp_log$sentiment, SplitRatio=0.7)
train1=subset(rmp_log, split1==TRUE)
test1=subset(rmp_log, split1==FALSE)
#train a logistic regression model
log_model1=glm(sentiment~student_star, data=train1, family="binomial")
#make predictions on a test data
predictions_log1=predict(log_model1, newdata=test1, type="response")
#assess the accuracy of the model
table(test1$sentiment, as.numeric(predictions_log1>=0.5))
#assess the accuracy of the baseline model
table(test1$sentiment)
#calculate AUC value
pred1=prediction(predictions_log1, test1$sentiment)
as.numeric(performance(pred1, "auc")@y.values)
# Part 2--------------------------------------------------------------------------------------------
#subset the data with only missing observations for the binary would_take_agains
rmp_na=rmp[is.na(rmp$would_take_agains),]
#use the trained logistic model to make predictions on the dataset with missing would_take_agains
predictions_na=predict(log_model1, newdata=rmp_na, type="response")
#create a sentiment variable
rmp_na$sentiment=ifelse(predictions_na>=0.5, 1, 0)
#create a final dataset that has all of would_take_agains observations filled in
rmp_final=rbind(rmp_log, rmp_na)
#create a corpus based on the student's comments and pre-process it to remove stopwords, etc
corpus=Corpus(VectorSource(rmp_final$comments))
corpus=tm_map(corpus, tolower)
corpus=tm_map(corpus, removePunctuation)
corpus=tm_map(corpus, removeWords, stopwords("english"))
corpus=tm_map(corpus, stemDocument)
#create term matrix and remove the sparse terms
dtm=DocumentTermMatrix(corpus)
dtm=removeSparseTerms(dtm, 0.97)
labeledComments=as.data.frame(as.matrix(dtm))
labeledComments$sentiment=rmp_final$sentiment
#split the data into training and testing 70/30
split2=sample.split(labeledComments$sentiment, SplitRatio=0.7)
train2=subset(labeledComments, split2==TRUE)
test2=subset(labeledComments, split2==FALSE)
#train a logistic regression model on the vectorized dataset of student's comments
log_model2=glm(sentiment~., data=train2, family="binomial")
#make predictions on the test data and check the accuracy of the model
predictions_log2=predict(log_model2, newdata=test2, type="response")
table(test2$sentiment, as.numeric(predictions_log2>=0.5))
table(test2$sentiment)
#calculate AUC value
pred2=prediction(predictions_log2, test2$sentiment)
as.numeric(performance(pred2, "auc")@y.values)
#create new variable "approval rate" for each instructor
rmp_final$approval_rate=mean(rmp_final$sentiment)
|
20cab104be6d4ba02ca32ae65a3615dbbaf1b24f
|
f50242a598a5b993fc91701390f54133aed0ef76
|
/code/lec5b.R
|
62de9a2f14ee17c727df33504a812f3da54d0da6
|
[] |
no_license
|
ecotyper/advanced-spatial-statistics-2021
|
bd1febee1767c5f93636f52154cf60facf2c0b79
|
78a010f21ee0e8dc8eb6aed1506d20ca45c14f5e
|
refs/heads/master
| 2023-05-05T07:26:17.451980
| 2021-03-12T18:45:43
| 2021-03-12T18:45:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,304
|
r
|
lec5b.R
|
##Predictive process example
rm(list=ls())
library(MBA)
library(fields)
library(spBayes)
##Simulated some data
rmvn <- function(n, mu=0, V = matrix(1)){
p <- length(mu)
if(any(is.na(match(dim(V),p))))
stop("Dimension problem!")
D <- chol(V)
t(matrix(rnorm(n*p), ncol=p)%*%D + rep(mu,rep(n,p)))
}
set.seed(1)
n <- 500
coords <- cbind(runif(n,0,1), runif(n,0,1))
X <- as.matrix(cbind(1, rnorm(n)))
B <- as.matrix(c(1,5))
p <- length(B)
sigma.sq <- 2
tau.sq <- 0.1
phi <- 3/0.5
D <- as.matrix(dist(coords))
R <- exp(-phi*D)
w <- rmvn(1, rep(0,n), sigma.sq*R)
y <- rnorm(n, X%*%B + w, sqrt(tau.sq))
##Set up spLM call
n.samples <- 2000
starting <- list("phi"=3/0.5, "sigma.sq"=50, "tau.sq"=1)
tuning <- list("phi"=0.05, "sigma.sq"=0.05, "tau.sq"=0.05)
priors <- list("beta.Norm"=list(rep(0,p), diag(1000,p)),
"phi.Unif"=c(3/1, 3/0.1), "sigma.sq.IG"=c(2, 2),
"tau.sq.IG"=c(2, 0.1))
cov.model <- "exponential"
n.report <- 100
verbose <- TRUE
##Call full GP and predictive process GP mosels
burn.in <- floor(0.75*n.samples)
##Full GP
m.gp <- spLM(y~X-1, coords=coords, starting=starting,
tuning=tuning, priors=priors, cov.model=cov.model,
n.samples=n.samples, verbose=verbose, n.report=n.report)
m.gp <- spRecover(m.gp, start=burn.in, thin=2)
## PP GP with 36 knots
m.pp.gp.25 <- spLM(y~X-1, coords=coords, knots=c(5,5,0.1), starting=starting,
tuning=tuning, priors=priors, cov.model=cov.model,
n.samples=n.samples, verbose=verbose, n.report=n.report)
m.pp.gp.25 <- spRecover(m.pp.gp.25, start=burn.in, thin=2)
## PP GP with 64 knots
m.pp.gp.64 <- spLM(y~X-1, coords=coords, knots=c(8,8,0.1), starting=starting,
tuning=tuning, priors=priors, cov.model=cov.model,
n.samples=n.samples, verbose=verbose, n.report=n.report)
m.pp.gp.64 <- spRecover(m.pp.gp.64, start=burn.in, thin=2)
## save(file="../data/pred_proc.Rdata",list=c("m.gp","m.pp.gp.25","m.pp.gp.64"))
## load("../data/pred_proc.Rdata")
## Timing
m.gp$run.time
m.pp.gp.25$run.time
m.pp.gp.64$run.time
## Summary cov parameters
round(summary(m.gp$p.theta.recover.samples)$quantiles[,c(3,1,5)],2)
round(summary(m.pp.gp.25$p.theta.recover.samples)$quantiles[,c(3,1,5)],2)
round(summary(m.pp.gp.64$p.theta.recover.samples)$quantiles[,c(3,1,5)],2)
## DIC
spDiag(m.gp)$DIC
spDiag(m.pp.gp.25)$DIC
spDiag(m.pp.gp.64)$DIC
## Summary random effects
m.gp.w.hat <- apply(m.gp$p.w.recover.samples, 1, median)
m.pp.25.w.hat <- apply(m.pp.gp.25$p.w.recover.samples, 1, median)
m.pp.64.w.hat <- apply(m.pp.gp.64$p.w.recover.samples, 1, median)
## Interpolate
surf.w <- mba.surf(cbind(coords, w), no.X=100, no.Y=100, extend=TRUE)$xyz.est
surf.gp <- mba.surf(cbind(coords, m.gp.w.hat), no.X=100, no.Y=100, extend=TRUE)$xyz.est
surf.pp.25 <- mba.surf(cbind(coords, m.pp.25.w.hat), no.X=100, no.Y=100, extend=TRUE)$xyz.est
surf.pp.64 <- mba.surf(cbind(coords, m.pp.64.w.hat), no.X=100, no.Y=100, extend=TRUE)$xyz.est
dev.new()
par(mfrow=c(2,2))
image.plot(surf.w, main="True w")
image.plot(surf.gp, main="GP estimated w")
image.plot(surf.pp.25, main="PPGP knots 25 w"); points(m.pp.gp.25$knot.coords, pch=19)
image.plot(surf.pp.64, main="PPGP knots 64 w"); points(m.pp.gp.64$knot.coords, pch=19)
|
f8a6accfeb8c506bc268547465f9979581907812
|
c9e05757720a37c8e7141a5a9a108efd38e71df8
|
/R/layout.R
|
18b302c859880059da8745d3ad1206308d959e2b
|
[] |
no_license
|
xiangpin/ggtree
|
50a075e211bc40df9563f49f4b845f6586ff5d90
|
b51cece3bcd1093f41c36570a47c5c1441f8c3c7
|
refs/heads/master
| 2023-09-04T04:35:28.008649
| 2023-08-11T03:41:24
| 2023-08-11T03:41:24
| 196,019,544
| 0
| 0
| null | 2019-09-06T13:35:08
| 2019-07-09T13:58:01
|
R
|
UTF-8
|
R
| false
| false
| 3,691
|
r
|
layout.R
|
##' rotate circular tree in a certain angle
##'
##'
##' @title rotate_tree
##' @param treeview tree view in circular layout
##' @param angle the angle of rotation
##' @return updated tree view
##' @export
##' @examples
##' tree <- rtree(15)
##' p <- ggtree(tree) + geom_tiplab()
##' p2 <- open_tree(p, 180)
##' rotate_tree(p2, 180)
##' @author Guangchuang Yu
rotate_tree <- function(treeview, angle) {
treeview <- treeview + coord_polar(theta='y', start=(angle-90)/180*pi, -1)
treeview$data$angle <- treeview$data$angle + angle
treeview$plot_env <- build_new_plot_env(treeview$plot_env)
assign("layout", "circular", envir = treeview$plot_env)
return(treeview)
}
##' transform a tree in either rectangular or circular layout into the fan layout
##' that opens with a specific angle
##'
##'
##' @title open_tree
##' @param treeview tree view in rectangular/circular layout
##' @param angle open the tree at a specific angle
##' @return updated tree view
##' @importFrom ggplot2 scale_y_continuous
##' @export
##' @examples
##' tree <- rtree(15)
##' p <- ggtree(tree) + geom_tiplab()
##' open_tree(p, 180)
##' @author Guangchuang Yu
open_tree <- function(treeview, angle) {
p <- treeview + layout_circular()
ymax <- max(range(p$data$y))
p <- p + scale_y_continuous(limits = c(0,
max(c(ymax * (1+angle/(360-angle)), ymax+1))
))
N <- nrow(p$data)
idx <- match(1:N, order(p$data$y))
NN <- N *(1+angle/(360-angle))
angle <- 360/(2+NN) * (1:N+1)
angle <- angle[idx]
p$data$angle <- angle
p$plot_env <- build_new_plot_env(p$plot_env)
assign("layout", "fan", envir = p$plot_env)
return(p)
}
##' transform circular/fan layout to rectangular layout
##'
##'
##' @title layout_rectangular
##' @rdname tree-layout
##' @export
##' @examples
##' tree <- rtree(20)
##' p <- ggtree(tree, layout = "circular") + layout_rectangular()
layout_rectangular <- function() {
layout_ggtree('rectangular')
}
##' transform rectangular layout to circular layout
##'
##'
##' @title layout_circular
##' @rdname tree-layout
##' @export
##' @examples
##' tree <- rtree(20)
##' p <- ggtree(tree)
##' p + layout_circular()
layout_circular <- function() {
layout_ggtree('circular')
}
##' transform rectangular/circular layout to inward circular layout
##'
##'
##' @title layout_inward_circular
##' @param xlim setting x limits, which will affect the center space of the tree
##' @rdname tree-layout
##' @export
##' @examples
##' tree <- rtree(20)
##' p <- ggtree(tree)
##' p + layout_inward_circular(xlim=4) + geom_tiplab(hjust=1)
layout_inward_circular <- function(xlim = NULL) {
if (!is.null(xlim) && length(xlim) == 1) {
xlim <- c(xlim, 0)
}
layout_ggtree(layout = "inward_circular", xlim = xlim)
}
##' transform rectangular/circular layout to fan layout
##'
##'
##' @title layout_fan
##' @rdname tree-layout
##' @param angle open tree at specific angle
##' @export
##' @examples
##' tree <- rtree(20)
##' p <- ggtree(tree)
##' p + layout_fan(angle=90)
layout_fan <- function(angle = 180) {
layout_ggtree('fan', angle = angle)
}
##' transform rectangular layout to dendrogram layout
##'
##'
##' @title layout_dendrogram
##' @rdname tree-layout
##' @export
##' @examples
##' tree <- rtree(20)
##' p <- ggtree(tree)
##' p + p + layout_dendrogram()
##' @author Guangchuang Yu
layout_dendrogram <- function() {
layout_ggtree('dendrogram')
}
layout_ggtree <- function(layout = 'rectangular', angle = 180, xlim = NULL) {
structure(list(layout = layout, angle = angle, xlim = xlim),
class = 'layout_ggtree')
}
|
a0d17d0101b574068391e3605a62f4c5ec6e1f00
|
d6003b28f81c59cc8731583fb930bb82f4365eac
|
/R/util.character.R
|
d34fefd93403b927544f5c728f08f26612e4ad14
|
[] |
no_license
|
cran/CHNOSZ
|
79616d683d9af7e16674cf08c7912c5f438c45d9
|
6c61dcecdbce300a990341f5370552b63ec42647
|
refs/heads/master
| 2023-03-24T08:33:38.797654
| 2023-03-13T16:10:32
| 2023-03-13T16:10:32
| 17,678,274
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,245
|
r
|
util.character.R
|
# CHNOSZ/util.character.R
# Functions to work with character objects
### Unexported functions ###
# Join the elements of a character object into a character object of length 1 (a string)
c2s <- function(x, sep=' ') {
# Make a string out of a character vector
if(length(x) %in% c(0,1)) return(x)
s <- paste(x,collapse=sep)
return(s)
}
# Split a string into elements of a character object of length n+1, where n is the number of separators in the string
# Default sep=NULL indicates a separator at every position of x
# keep.sep is used to keep the separators in the output
s2c <- function(x,sep=NULL,keep.sep=TRUE) {
# Recursively split 'x' according to separation strings in 'sep'
do.split <- function(x,sep,keep.sep=TRUE) {
# Split the elements of x according to sep
# Output is a list the length of x
if(is.list(x)) stop("x is a list; it must be a character object (can have length > 1)")
x <- as.list(x)
for(i in 1:length(x)) {
# Do the splitting
xi <- strsplit(x[[i]],sep,fixed=TRUE)[[1]]
# Paste the separation term term back in
if(keep.sep & !is.null(sep)) {
xhead <- character()
xtail <- xi
if(length(xi) > 1) {
xhead <- head(xi,1)
xtail <- tail(xi,-1)
# In-between matches
xtail <- paste("",xtail,sep=sep)
}
# A match at the end ... grep here causes problems
# when sep contains control characters
#if(length(grep(paste(sep,"$",sep=""),x[[i]]) > 0)) xtail <- c(xtail,sep)
# Use substr instead
nx <- nchar(x[[i]])
ns <- nchar(sep)
if(substr(x[[i]],nx-ns+1,nx) == sep) xtail <- c(xtail,sep)
xi <- c(xhead,xtail)
}
x[[i]] <- xi
}
return(x)
}
# Now do it!
for(i in 1:length(sep)) x <- unlist(do.split(x,sep[i],keep.sep=keep.sep))
return(x)
}
# Return a value of TRUE or FALSE for each element of x
can.be.numeric <- function(x) {
# Return FALSE if length of argument is zero
if(length(x) == 0) FALSE else
if(length(x) > 1) as.logical(sapply(x, can.be.numeric)) else {
if(is.numeric(x)) TRUE else
if(!is.na(suppressWarnings(as.numeric(x)))) TRUE else
if(x %in% c('.','+','-')) TRUE else FALSE
}
}
|
3ee1f3e6336ccdea7f9a46fc13d145e98298ff14
|
f38e7d1550ffe3628d958675a58263648b50caed
|
/server.R
|
dc041da603e8bbb63fff3f8c14ab3248becc15a2
|
[] |
no_license
|
jhooge/BayesianSampleSize
|
466662e98c1b045934cb4007554df6ae7e0c797a
|
6706db075b8de89310ca294b710b49bfaf5b9f99
|
refs/heads/master
| 2021-01-23T10:35:09.297577
| 2017-07-31T14:19:59
| 2017-07-31T14:19:59
| 93,073,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,977
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(reshape2)
library(ggplot2)
library(plotly)
betaMode <- function(alpha, beta) {
return((alpha - 1)/(alpha+beta-2))
}
betaMean <- function(alpha, beta) {
return((alpha)/(alpha+beta))
}
betaStd <- function(alpha, beta) {
return(sqrt((alpha * beta)/(((alpha + beta)**2) * (alpha + beta + 1))))
}
posterior <- function(theta, n, k, alpha=1, beta=1) {
x <- dbeta(theta, alpha+k, beta+n-k)
return(x)
}
## P(theta <= theta_0|k, alpha, beta)
posterior_cdf <- function(theta, n, k, alpha=1, beta=1) {
stopifnot(theta >= 0)
stopifnot(theta <= 1)
# x <- pbeta(theta, alpha+k, beta+n-k)
x <- cumsum(posterior(theta, alpha, beta, n, k))
return(x)
}
densbeta <- function(theta, n, k, alpha=1, beta=1) {
stopifnot((theta < 1 || theta > 0) )
x <- dbeta(theta, alpha+k, beta+n-k)
return(x)
}
## Beta Binomial over k's
my_dbeta <- function(n, k, alpha, beta) {
a <- base::beta(alpha+k, beta+n-k)*choose(n, k)
b <- base::beta(alpha, beta)
x <- a/b
return(x)
}
#' Reimplementation of PROBNML function in SAS
#' @description computes the probability that an observation from a
#' binomial distribution Bin(n,theta) will be less than or equal to k.
#'
#' @param theta is the probability of success for the
#' binomial distribution, where 0<=theta<=1.
#' In terms of acceptance sampling, is the
#' probability of selecting a nonconforming item.
#' @param n is the number of independent Bernoulli trials in the
#' binomial distribution, where n>=1. In terms of acceptance
#' sampling, n is the number of items in the sample.
#' @param k is the number of successes, where 0<=k<=n. In terms of acceptance
#' sampling, is the number of nonconforming items.
#'
#' @return computes the probability that an observation from a
#' binomial distribution Bin(n,theta) will be less than or equal to k.
#' @export
probbnml <- function(theta, n, k) {
stopifnot(theta >= 0)
stopifnot(theta <= 1)
stopifnot(n >= 1)
x <- sum(sapply(0:k, function(j) choose(n, j)*(theta**j)*((1-theta)**(n-j))))
return(x)
}
crit_k <- function(theta, prob, n, alpha=1, beta=1, type=c("go", "nogo")) {
stopifnot(theta >= 0)
stopifnot(theta <= 1)
stopifnot(prob >= 0)
stopifnot(prob <= 1)
stopifnot(type %in% c("go", "nogo"))
x <- sapply(0:n, function(k) densbeta(theta, n, k, alpha, beta))
x <- x/sum(x)
# x <- sapply(0:n, function(k) my_dbeta(n, k, alpha, beta))
x <- cumsum(x)
k <- ifelse(type=="go",
which.min(x <= prob),
which.max(x >= 1-prob))
k <- k-1 ## k starts at 0
# print(paste0("Type=", type))
# print(sprintf("k=%i",k))
# print(sprintf("p_type=%.2f", prob))
# print(x)
return(k)
}
plotPosteriorCDF <- function(k, go_cdf, nogo_cdf,
k_go, k_nogo) {
data <- data.frame(k,
go=go_cdf,
nogo=nogo_cdf)
data.molten <- melt(data, id.vars = "k")
colnames(data.molten) <- c("k", "Function", "Probability")
font <- list(
# family = "sans serif",
size = 14,
color = '#EBEBE9')
k_nogo_text <- paste0("NoGo Crit. (", k_nogo, "|",
round(nogo_cdf[k_nogo], 4), ")")
k_nogo_val <- list(
x = k_nogo,
y = lower_cdf[k_nogo],
text = paste0("Lower Crit. (", k_nogo, "|",
round(nogo_cdf[k_nogo], 4), ")"),
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 7,
arrowcolor = "#C5C9CB",
ax = 50,
ay = -40
)
k_go_text <- paste0("Upper Crit. (", k_go, "|",
round(go_cdf[k_go], 4), ")")
k_go_val <- list(
x = k_go,
y = go_cdf[k_go],
text = k_go_text,
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 7,
arrowcolor = "#C5C9CB",
ax = -50,
ay = -40
)
annotations <- list(k_nogo_val,
k_go_val)
fig <- plot_ly(data.molten, x = ~k, y= ~Probability,
type = 'scatter', mode = 'lines',
line = list(width = 5),
color = ~Function
) %>%
layout(title = 'Cummulative Densities',
hovermode="all",
xaxis = list(title = 'Number of Successes',
range = c(0, max(data$k)),
showgrid = F),
yaxis = list(title = 'Probability',
range = c(0, 1),
showgrid = F),
# legend = list(orientation = 'h'),
legend = list(x = 0.8, y = 0.5),
annotations = annotations,
font=font,
plot_bgcolor="transparent",
paper_bgcolor="transparent")
return(fig)
}
plotPowerCurves <- function(thetas,
go_power, nogo_power, indecisive_power) {
data <- data.frame(theta=thetas,
Go=go_power,
NoGo=nogo_power,
Indecisive=indecisive_power)
data.molten <- melt(data, id.vars = "theta")
colnames(data.molten) <- c("theta", "Function", "Power")
font <- list(
# family = "sans serif",
size = 14,
color = '#EBEBE9')
fig <- plot_ly(data.molten, x = ~theta, y= ~Power,
type = 'scatter', mode = 'lines',
line = list(width = 5),
color = ~Function) %>%
layout(title = 'Power Curves',
hovermode="all",
xaxis = list(title = 'θ',
tick0 = 0,
dtick = 0.1,
showgrid = F),
yaxis = list(title = 'Power',
dtick = 0.1,
# range = c(0, 1),
showgrid = F),
legend = list(orientation = 'h'),
font = font,
plot_bgcolor="transparent",
paper_bgcolor="transparent")
return(fig)
}
plotDensities <- function(theta, posterior, likelihood, prior) {
data <- data.frame(Theta=theta,
Posterior=posterior, Likelihood=likelihood, Prior=prior)
data.molten <- melt(data, id.vars = "Theta")
colnames(data.molten) <- c("Theta", "Function", "Density")
font <- list(
# family = "sans serif",
size = 14,
color = '#EBEBE9')
fig <- plot_ly(data.molten, x = ~Theta, y= ~Density,
type = 'scatter', mode = 'lines',
line = list(width = 5),
color = ~Function) %>%
layout(title = 'Density Functions',
hovermode="all",
xaxis = list(title = "θ",
tick0 = 0,
dtick = 0.1,
showgrid = F),
yaxis = list(title = 'Density',
range = c(0, max(data$Posterior, data$Likelihood)),
showgrid = F),
legend = list(orientation = 'h'),
font=font,
plot_bgcolor="transparent",
paper_bgcolor="transparent")
return(fig)
}
shinyServer(function(input, output) {
## reactive vars
x <- reactive({
set.seed(42)
x <- rbinom(input$n, size = 1, input$pi)
})
dens <- reactive({
n <- input$n
m <- 101
alpha <- input$alpha
beta <- input$beta
thetas <- seq(0, 1, length.out=m)
d <- c()
for (theta in thetas) {
for (k in 1:n) {
d <- c(d, posterior(theta, n, k, alpha, beta))
}
}
d <- matrix(d, nrow = n, ncol=m)
# d <- apply(d, 1, function(x) x/sum(x))
return(d)
})
output$samplingInput <- renderUI({
n <- input$n
probInput <- numericInput("pi", withMathJax("$$\\textbf{Success}\\ \\textbf{Probability}\\ \\theta$$"),
min = 0, max = 1, step = .1,
# value = value,
value = .5,
width = "40%")
successInput <- numericInput("k", withMathJax("$$\\textbf{Number of Successes}\\ k$$"),
min = 0, max = n, step = 1,
# value = floor(value),
value = floor(n/2),
width = "40%")
uiElement <- switch(input$radioSample,
"prob" = probInput,
"successes" = successInput,
probInput)
return(uiElement)
})
output$triPlot <- renderPlotly({
prob <- input$prob ## success probability
x <- x()
n <- length(x)
alpha <- input$alpha
beta <- input$beta
pi <- seq(0, 1, length.out=1000)
## Data
k <- switch(input$radioSample,
"prob" = sum(x),
"successes" = input$k,
probInput)
# k <- sum(x) ## number of successes
# Likelihood p(x|pi_test) with x ~ Bin(pi_test, alpha_test, beta)
likelihood <- dbinom(k, n, pi)
likelihood <- likelihood/(sum(likelihood)/length(pi)) ## Normalize Density
## Prior p(pi) based on Beta(pi, alpha, beta)
prior <- dbeta(pi, alpha, beta)
# prior <- prior/(sum(prior[!is.infinite(prior)])/length(pi)) ## Normalize Density
## Posterior Distribution p(pi|x)
posterior <- dbeta(pi, alpha+k, beta+n-k)
# posterior <- posterior/(sum(posterior)/length(pi)) ## Normalize Density
fig <- plotDensities(pi, posterior, likelihood, prior)
return(fig)
})
output$posteriorProbPlot <- renderPlotly({
prob <- input$prob ## success probability
x <- x()
n <- length(x)
alpha <- input$alpha
beta <- input$beta
theta <- seq(0, 1, length.out=1000)
## Data
k <- switch(input$radioSample,
"prob" = sum(x),
"successes" = input$k,
probInput)
## Posterior Distribution p(theta|x)
posterior <- dbeta(theta, alpha+k, beta+n-k)
posterior <- posterior/sum(posterior)
go_prob <- cumsum(posterior)
nogo_prob <- 1-cumsum(posterior)
data <- data.frame(Theta=theta,
Go=go_prob,
NoGo=nogo_prob)
data.molten <- melt(data, id.vars = "Theta")
colnames(data.molten) <- c("Theta", "Function", "Probability")
fig <- plot_ly(data.molten, x = ~Theta, y= ~Probability,
type = 'scatter', mode = 'lines',
line = list(width = 5),
color = ~Function) %>%
# add_trace(x = c(.3, .6), y=c(.3, .6), mode = "lines") %>%
layout(title = 'Probability Functions',
shapes=list(type='line',
x0=0.2, x1=0.2, #
y0=0.3, y1=0.3,
line=list(dash='dot', width=1)),
hovermode="all",
xaxis = list(title = 'theta',
tick0 = 0,
dtick = 0.1),
yaxis = list(title = 'Probability'),
legend = list(orientation = 'h'))
return(fig)
})
output$critValPlot <- renderPlotly({
n <- input$n
theta <- input$theta_0
alpha <- input$alpha
beta <- input$beta
p_go <- input$p_go
p_nogo <- input$p_nogo
validate(need(sum(p_go, p_nogo) >= 1, "The sum of Go and NoGo probabilities should be larger or equal to 1."))
k_go <- crit_k(theta=theta, prob=p_go, n=n, alpha=alpha, beta=beta, type="go")
k_nogo <- crit_k(theta=theta, prob=p_nogo, n=n, alpha=alpha, beta=beta, type="nogo")
prob_go <- sapply(0:n, function(k) densbeta(theta, n, k, alpha, beta))
# prob_go <- prob_go/length(prob_go)
prob_go <- prob_go/length(prob_go)
# print(prob_go)
# print(sum(prob_go))
prob_go <- cumsum(prob_go)
# prob_go <- prob_go/length(prob_go)
prob_nogo <- sapply(0:n, function(k) densbeta(theta, n, k, alpha, beta))
print(sum(prob_nogo/length(prob_nogo)))
prob_nogo <- prob_nogo/length(prob_nogo)
prob_nogo <- 1-cumsum(prob_nogo)
# prob_nogo <- 1-prob_nogo/length(prob_nogo)
# prob_go <- sapply(0:n, function(k) my_dbeta(n, k, alpha, beta))
# prob_go <- cumsum(prob_go)
#
# prob_nogo <- sapply(0:n, function(k) my_dbeta(n, k, alpha, beta))
# prob_nogo <- 1-cumsum(prob_nogo)
data <- data.frame(k=0:n,
Go=prob_go,
NoGo=prob_nogo)
data.molten <- melt(data, id.vars = "k")
colnames(data.molten) <- c("k", "Function", "Probability")
font <- list(
# family = "sans serif",
size = 14,
color = '#EBEBE9')
k_nogo_text <- paste0("NoGo Crit. (", k_nogo, "|",
round(data$NoGo[k_nogo+1], 4), ")")
k_nogo_val <- list(
x = k_nogo,
y = data$NoGo[k_nogo+1],
text = paste0("NoGo Crit. (", k_nogo, "|",
round(data$NoGo[k_nogo+1], 4), ")"),
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 7,
arrowcolor = "#C5C9CB",
ax = 50,
ay = -40
)
k_go_text <- paste0("Go Crit. (", k_go, "|",
round(data$Go[k_go+1], 4), ")")
k_go_val <- list(
x = k_go,
y = data$Go[k_go+1],
text = k_go_text,
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 7,
arrowcolor = "#C5C9CB",
ax = -50,
ay = -40
)
annotations <- list(k_nogo_val,
k_go_val)
fig <- plot_ly(data.molten, x = ~k, y= ~Probability,
type = 'scatter', mode = 'lines',
line = list(width = 5),
color = ~Function) %>%
layout(title = 'Cummulative Densities',
hovermode="all",
xaxis = list(title = 'Number of Successes (k)',
range = c(0, max(data$k)),
showgrid = F),
yaxis = list(title = 'Probability',
# range = c(0, 1),
showgrid = F),
legend = list(orientation = 'h'),
# legend = list(x = 0.8, y = 0.5),
annotations = annotations,
font=font,
plot_bgcolor="transparent",
paper_bgcolor="transparent")
return(fig)
})
output$posterior3DPlot <- renderPlotly({
dens <- dens()
n <- nrow(dens)
m <- ncol(dens)
print(dim(dens))
thetas <- seq(0, 1, length.out=m)
font <- list(
# family = "sans serif",
size = 14,
color = '#EBEBE9')
fig <- plot_ly(x=thetas, y=1:nrow(dens), z=dens, showscale=FALSE, source="posterior3D") %>%
add_surface() %>%
layout(title = 'Density',
hovermode="all",
scene = list(
xaxis = list(
title = "θ",
tick0 = 0,
dtick = 0.2,
# range = c(0, 1),
showgrid = T),
yaxis = list(title = 'k',
# range = c(0, n),
showgrid = T),
zaxis = list(title = 'Density',
range = c(0, 20),
showgrid= F)),
font=font,
plot_bgcolor="transparent",
paper_bgcolor="transparent",
showlegend=FALSE)
return(fig)
})
output$posterior2D_k <- renderPlotly({
dens <- dens()
n <- nrow(dens)
m <- ncol(dens)
hover_event <- event_data("plotly_hover", source="posterior3D")
click_event <- event_data("plotly_click", source="posterior3D")
validate(
need(!is.null(click_event), "Please click in the density plot above to select a projection.")
)
theta <- click_event$x
thetas <- seq(0, 1, length.out=m)
i <- match(theta, thetas)
dens <- dens[, i]
dens <- dens/sum(dens)
data <- data.frame(k=1:n,
Density=dens)
font <- list(
# family = "sans serif",
size = 14,
color = '#EBEBE9')
fig <- plot_ly(data, x = ~k, y= ~Density,
type = 'scatter', mode = 'lines',
line = list(width = 5,
color="#510E61")) %>%
layout(title = sprintf("For θ=%.2f", theta),
hovermode="all",
xaxis = list(title = 'k',
tick0 = 1,
showgrid = F),
yaxis = list(title = 'Density',
showgrid = F),
legend = list(orientation = 'h'),
font=font,
plot_bgcolor="transparent",
paper_bgcolor="transparent",
showlegend=FALSE)
return(fig)
})
output$posterior2D_theta <- renderPlotly({
dens <- dens()
n <- nrow(dens)
m <- ncol(dens)
hover_event <- event_data("plotly_hover", source="posterior3D")
click_event <- event_data("plotly_click", source="posterior3D")
validate(
need(!is.null(click_event), "Please click in the density plot above to select a projection.")
)
k <- click_event$y
thetas <- seq(0, 1, length.out=m)
dens <- dens[k, ]
dens <- dens/sum(dens)
# dens <- dens/m ## scale based on theta resolution
data <- data.frame(theta=thetas,
Density=dens)
font <- list(
# family = "sans serif",
size = 14,
color = '#EBEBE9')
fig <- plot_ly(data, x = ~theta, y= ~Density,
type = 'scatter', mode = 'lines',
line = list(width = 5,
color="#510E61")) %>%
layout(title = sprintf("For k=%i", k),
hovermode="all",
xaxis = list(title = 'θ',
tick0 = 0,
dtick = 0.1,
showgrid = F),
yaxis = list(title = 'Density',
showgrid = F),
legend = list(orientation = 'h'),
font=font,
plot_bgcolor="transparent",
paper_bgcolor="transparent",
showlegend=FALSE)
return(fig)
})
output$powerCurvePlot <- renderPlotly({
n <- input$n
alpha <- input$alpha
beta <- input$beta
theta <- input$theta_0
p_go <- input$p_go
p_nogo <- input$p_nogo
validate(need(sum(p_go, p_nogo) >= 1, "The sum of Go and NoGo probabilities should be larger or equal to 1."))
## Compute probability P(k|theta) and estimate critical k for which
## the following conditions are fullfilled
## 1) "Go" decision iff P(theta <= theta_0|k_go) < 1 - p_go
## 2) "No Go" decision iff P(theta <= theta_0|k_nogo) < p_nogo
##
## Condition 1 can be solved by searching the greatest k with
## cumsum(dbeta(theta_0, alpha+k, beta+n-k)) <= 1 - p_go
##
## Condition 2 can be solved by searching the greatest k with
## cumsum(dbeta(theta_0, alpha+k, beta+n-k)) >= p_nogo
k_go <- crit_k(theta=theta, prob=p_go,
n=n, alpha=alpha, beta=beta,
type="go")
k_nogo <- crit_k(theta=theta, prob=p_nogo, n=n,
alpha=alpha, beta=beta,
type="nogo")
## Go Power
theta <- seq(0, 1, by=.001)
go_pwr <- sapply(theta, function(theta) probbnml(theta, n, k_go))
# go_pwr <- 1 - go_pwr
go_pwr <- 1 - go_pwr
## No Go Power
# no_go_pwr <- sapply(theta, function(theta) probbnml(theta, n, k_nogo))
no_go_pwr <- sapply(theta, function(theta) probbnml(theta, n, k_nogo))
## Indecisive Power
# indecisive_pwr <- 1 - no_go_pwr - go_pwr
indecisive_pwr <- 1 - no_go_pwr - go_pwr
# ## Sanity check
a <- no_go_pwr[which.max(indecisive_pwr)]
b <- go_pwr[which.max(indecisive_pwr)]
c <- max(indecisive_pwr)
print("Sane?")
print(sum(a, b, c) == 1)
fig <- plotPowerCurves(theta, go_pwr, no_go_pwr, indecisive_pwr)
return(fig)
})
## latex elements
output$priorDistFormula <- renderUI({
alpha <- input$alpha
beta <- input$beta
uiElement <- withMathJax(helpText(sprintf('$$\\begin{align}
p(\\theta)&=\\frac{\\theta^{\\alpha-1}(1-\\theta)^{\\beta-1}}{B(\\alpha, \\beta)}
\\\\&=Beta(\\theta|\\alpha, \\beta)
\\\\&=Beta(\\theta|\\textbf{%.2f}, \\textbf{%.2f})
\\end{align}$$', alpha, beta, alpha, beta, alpha, beta)))
return(uiElement)
})
output$likelihoodFormula <- renderUI({
n <- input$n
k <- sum(x())
alpha <- input$alpha
beta <- input$beta
uiElement <- list(withMathJax(helpText(sprintf('$$X\\sim Bin(n, \\theta) = Bin(\\textbf{%i}, \\theta)$$', n))),
withMathJax(helpText(sprintf('$$\\begin{align}
p(x|\\theta)&={n\\choose{x}}\\theta^{x}(1-\\theta)^{n-x}
\\\\&={\\textbf{%i}\\choose{\\textbf{%i}}}\\theta^{\\textbf{%i}}(1-\\theta)^{\\textbf{%i}}
\\end{align}$$', n, k, k, n-k))))
return(uiElement)
})
output$posteriorFormula <- renderUI({
n <- input$n
k <- sum(x())
alpha <- input$alpha
beta <- input$beta
uiElement <- withMathJax(helpText(sprintf('$$\\begin{align}
p(\\theta|x)&=p(x|\\theta)p(\\theta)
\\\\&=\\theta^{x}(1-\\theta)^{n-x}\\theta^{\\alpha-1}(1-\\theta)^{\\beta-1}
\\\\&=\\theta^{(\\alpha+x)-1}(1-\\theta)^{(\\beta+n-x)-1}
\\\\&=Beta(\\theta|\\alpha+x, \\beta+n-x)
\\\\&=Beta(\\theta|\\textbf{%.2f}, \\textbf{%.2f})
\\end{align}$$', sum(alpha, k), sum(beta, n, -k))))
return(uiElement)
})
## point estimate calculations
output$pointEst_Prior <- renderTable({
validate(
need(!(input$n==0), NULL)
)
alpha <- input$alpha
beta <- input$beta
beta_mode <- betaMode(alpha, beta)
beta_mean <- betaMean(alpha, beta)
beta_std <- betaStd(alpha, beta)
pE <- data.frame(Type=c("Mode", "Mean", "Std"),
PointEstimate=c(beta_mode, beta_mean, beta_std))
return(pE)
})
output$pointEst_Likelihood <- renderTable({
x <- x()
n <- input$n
validate(
need(!(input$n==0), NULL)
)
k <- switch(input$radioSample,
"prob" = sum(x),
"successes" = input$k,
probInput)
alpha <- k + 1
beta <- n - k + 1
beta_mode <- betaMode(alpha, beta)
beta_mean <- betaMean(alpha, beta)
beta_std <- betaStd(alpha, beta)
pE <- data.frame(Type=c("Mode", "Mean", "Std"),
PointEstimate=c(beta_mode, beta_mean, beta_std))
return(pE)
})
output$pointEst_Posterior <- renderTable({
x <- x()
n <- input$n
validate(
need(n!=0, NULL)
)
k <- switch(input$radioSample,
"prob" = sum(x),
"successes" = input$k,
probInput)
alpha <- input$alpha + k
beta <- input$beta + n - k
beta_mode <- betaMode(alpha, beta)
beta_mean <- betaMean(alpha, beta)
beta_std <- betaStd(alpha, beta)
pE <- data.frame(Type=c("Mode", "Mean", "Std"),
PointEstimate=c(beta_mode, beta_mean, beta_std))
return(pE)
})
})
|
d36206537e3adf09cbf64ece73922f64bf16e7bc
|
fffafd2ff14a6c1b1df23a48790566a71bbf7b36
|
/Summary_statistics_complete_case.R
|
bbe0bc5b8521a93c9c7ca2491ae0b91b3f0cd8df
|
[] |
no_license
|
Ciarrah/tchr_rprtng_ccrcy
|
6eaf93710483dfd761b6c4cce668b3ed63abf786
|
ee3a17cf5661838cd0f773af92824ae295301041
|
refs/heads/main
| 2021-06-22T19:59:39.997422
| 2021-01-28T14:39:46
| 2021-01-28T14:39:46
| 185,997,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,848
|
r
|
Summary_statistics_complete_case.R
|
# SUMMARY STATISTICS
table(data_raw$sex)
round((table(data_raw$sex)/nrow(data_raw))*100,2)
summary(data_raw.ks2$AvTA)
round(sd(data_raw.ks2$AvTA),2)
summary(data_raw.ks3$AvTA)
round(sd(data_raw.ks3$AvTA),2)
mean(data_raw$mnth_dlvry)
sd(data_raw$mnth_dlvry)
length(which(data_raw$ks2_Av>0))
data_raw$ks2_Av=as.double(data_raw$ks2_Av)
round(mean(data_raw$ks2_Av[which(data_raw$ks2_Av>0)]),2)
round(sd(data_raw$ks2_Av[which(data_raw$ks2_Av>0)]),2)
length(which(data_raw$ks3_Av>0))
data_raw$ks3_Av=as.double(data_raw$ks3_Av)
round(mean(data_raw$ks3_Av[which(data_raw$ks3_Av>0)]),2)
round(sd(data_raw$ks3_Av[which(data_raw$ks3_Av>0)]),2)
table(data_raw$SEN.F)
round((table(data_raw$SEN.F)/nrow(data_raw))*100,2)
data_raw$mthr_HE=as.factor(data_raw$mthr_HE)
table(data_raw$mthr_HE)
round((table(data_raw$mthr_HE)/nrow(data_raw))*100,2)
data_raw$scl_clss=factor(data_raw$scl_clss)
table(data_raw$scl_clss)
round((table(data_raw$scl_clss)/nrow(data_raw))*100,2)
data_raw$incm_wk=as.factor(data_raw$incm_wk)
table(data_raw$incm_wk)
round((table(data_raw$incm_wk)/nrow(data_raw))*100,2)
length(data_raw$nmbr_ppls[which(data_raw$nmbr_ppls>0)])
round(mean(data_raw$nmbr_ppls[which(data_raw$nmbr_ppls>0)]),2)
round(sd(data_raw$nmbr_ppls[which(data_raw$nmbr_ppls>0)]),2)
data_raw$tchr_sx=as.factor(data_raw$tchr_sx)
length(which(data_raw$tchr_sx%in%levels(data_raw$tchr_sx)[c(1,2)]))
table(data_raw$tchr_sx)
round(((summary(data_raw$tchr_sx)[c(1,2)]*100)/length(which(data_raw$tchr_sx%in%levels(data_raw$tchr_sx)[c(1,2)]))),2)
data_raw$lngth_srv=as.factor(data_raw$lngth_srv)
length(which(data_raw$lngth_srv%in%levels(data_raw$lngth_srv)[c(1:4)]))
table(data_raw$lngth_srv)
round((summary(data_raw$lngth_srv)[c(1:4)]*100)/length(which(data_raw$lngth_srv%in%levels(data_raw$lngth_srv)[c(1:4)])),2)
|
1fdc338005639df1e7b9246f655860089062e846
|
400b384715f5f02ef43118f792d9eb73de314b2b
|
/tests/testthat/test-calc_pve.R
|
57f9ad6634774285d23c5a4eab2e2e5f99c32fc7
|
[] |
no_license
|
yassato/rNeighborQTL
|
8d250721fee767f96dd115324321a1c18ba67e1c
|
f3c54151f794fa0c9c213c9f0834b8e51aa4d6a4
|
refs/heads/master
| 2023-04-28T02:43:01.742030
| 2021-05-11T08:06:03
| 2021-05-11T08:06:03
| 253,141,548
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,346
|
r
|
test-calc_pve.R
|
context("calc_pve")
#load data
colkas <- qtl::read.cross(format="csvs",dir="./",genfile="ColKas_geno.csv",phefile = "ColKas_pheno.csv",
na.strings = c("_"), estimate.map=TRUE, crosstype = "riself")
colkas <- colkas[1:2,1:50]
colkas_genoprob <- qtl::calc.genoprob(colkas, step=4)
x <- colkas$pheno[,2]
y <- colkas$pheno[,3]
smap_colkas <- data.frame(x,y)
s_colkas <- quantile(dist(smap_colkas),c(0.1*(0:10)))
#F2
set.seed(1234)
data("fake.f2",package="qtl")
fake_f2 <- fake.f2[1:2,1:50]
smap_f2 <- cbind(runif(qtl::nind(fake_f2),1,100),runif(qtl::nind(fake_f2),1,100))
genoprobs_f2 <- qtl::calc.genoprob(fake_f2,step=4)
s_f2 <- quantile(dist(smap_f2),c(0.1*(1:10)))
#backcross
set.seed(1234)
data("fake.bc",package="qtl")
fake_bc <- fake.bc[1:2,1:50]
smap_bc <- cbind(runif(qtl::nind(fake_bc),1,100),runif(qtl::nind(fake_bc),1,100))
genoprobs_bc <- qtl::calc.genoprob(fake_bc,step=4)
s_bc <- quantile(dist(smap_bc),c(0.1*(1:10)))
f2_bin <- as.numeric(fake_f2$pheno[,1]>mean(fake_f2$pheno[,1]))
bc_bin <- as.numeric(fake_bc$pheno[,1]>mean(fake_bc$pheno[,1]))
test_that(
desc = "pve_range",
code = {
colkas_pve <- calc_pve(genoprobs=colkas_genoprob,
pheno=log(colkas$pheno[,5]+1),
smap=smap_colkas, s_seq=s_colkas[1:3],
addcovar=as.matrix(colkas$pheno[,7:9]),
fig=FALSE)
f2_pve <- calc_pve(genoprobs=genoprobs_f2,
pheno=fake_f2$pheno[,1],
smap=smap_f2, s_seq=s_f2[1:3],
addcovar=as.matrix(fake_f2$pheno$sex),
fig=FALSE)
bc_pve <- calc_pve(genoprobs=genoprobs_bc,
pheno=fake_bc$pheno[,1],
smap=smap_bc, s_seq=s_bc[1:3],
addcovar=as.matrix(cbind(fake_bc$pheno$sex,fake_bc$pheno$age)),
fig=FALSE)
expect_true(all(round(colkas_pve[,3],1)>=0))
expect_true(all(round(f2_pve[,3],1)>=0))
expect_true(all(round(bc_pve[,3],1)>=0))
expect_true(all(round(colkas_pve[,3],1)<=1))
expect_true(all(round(f2_pve[,3],1)<=1))
expect_true(all(round(bc_pve[,3],1)<=1))
})
test_that(
desc = "binary_action",
code = {
colkas_pveBin <- calc_pve(genoprobs=colkas_genoprob,
pheno=colkas$pheno[,6],
smap=smap_colkas,s_seq=s_colkas[1:3],
addcovar=NULL,
response="binary", fig=FALSE)
f2_pveBin <- calc_pve(genoprobs=genoprobs_f2,
pheno=f2_bin,
smap=smap_f2, s_seq=s_f2[1:3],
addcovar=as.matrix(fake_f2$pheno$sex),
response="binary", fig=FALSE)
bc_pveBin <- calc_pve(genoprobs=genoprobs_bc,
pheno=bc_bin,
smap=smap_bc, s_seq=s_bc[1:3],
addcovar=as.matrix(cbind(fake_bc$pheno$sex,fake_bc$pheno$age)),
response="binary", fig=FALSE)
expect_true(all(is.na(colkas_pveBin[,4])))
expect_true(all(is.na(f2_pveBin[,4])))
expect_true(all(is.na(bc_pveBin[,4])))
}
)
|
40a9dd30b37331f8f7523c9063396f661ba3db1d
|
95a27e35d9246630e835386eb04d18786209d278
|
/SVM - Kernels - letters dataset.R
|
e1819e62db64e9c3652f6c1e330bdd0a6bc852ee
|
[] |
no_license
|
ApurwaLoya/Data-Science
|
b05109ada4b400162bab1b33681e563aa94e3ee9
|
e14a4ae375f099d4abd60711e2b0e64f5b4326fe
|
refs/heads/master
| 2022-12-28T18:41:26.474913
| 2020-10-17T12:31:42
| 2020-10-17T12:31:42
| 254,518,691
| 0
| 1
| null | 2020-10-01T05:35:05
| 2020-04-10T01:46:09
|
R
|
UTF-8
|
R
| false
| false
| 1,126
|
r
|
SVM - Kernels - letters dataset.R
|
#####Support Vector Machines -------------------
## Optical Character Recognition ----
letterdata<-read.csv("E:\\Data Science\\Class\\letterdata.csv")
# divide into training and test data
letters_train <- letterdata[1:16000, ]
letters_test <- letterdata[16001:20000, ]
##Training a model on the data ----
# begin by training a simple linear SVM
library(kernlab)
letter_classifier <- ksvm(letter ~ ., data = letters_train,
kernel = "vanilladot")
## Evaluating model performance ----
# predictions on testing dataset
letter_predictions <- predict(letter_classifier, letters_test)
head(letter_predictions)
#table(letter_predictions, letters_test$letter)
agreement <- letter_predictions == letters_test$letter
prop.table(table(agreement))
## Improving model performance ----
letter_classifier_rbf <- ksvm(letter ~ ., data = letters_train, kernel = "rbfdot")
letter_predictions_rbf <- predict(letter_classifier_rbf, letters_test)
head(letter_predictions_rbf)
agreement_rbf <- letter_predictions_rbf == letters_test$letter
table(agreement_rbf)
prop.table(table(agreement_rbf))
|
dd813696a03f2c11d51b6bdc21fc94830fa766c0
|
cf26b183b4a36144637938283813abd4a23cb303
|
/man/isIrreducible.Rd
|
9171645df077d686334b4157a714dca8bb29a3f5
|
[] |
no_license
|
cran/popdemo
|
5312fbff68852f6cbb602ac1b1abae05e8879719
|
2f6b882b1b0851d1942c5892071c956a5457a6a0
|
refs/heads/master
| 2023-03-16T02:09:54.158218
| 2021-11-16T13:20:02
| 2021-11-16T13:20:02
| 17,719,255
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,374
|
rd
|
isIrreducible.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isIrreducible.R
\name{isIrreducible}
\alias{isIrreducible}
\title{Determine reducibility of a matrix}
\usage{
isIrreducible(A)
}
\arguments{
\item{A}{a square, non-negative numeric matrix of any dimension.}
}
\value{
\code{TRUE} (for an irreducible matrix) or \code{FALSE} (for a reducible
matrix).
}
\description{
Determine whether a matrix is irreducible or reducible
}
\details{
\code{isIrreducible} works on the premise that a matrix \strong{A}
is irreducible if and only if (\strong{I}+\strong{A})^(s-1) is positive,
where \strong{I} is the identity matrix of the same dimension as \strong{A}
and s is the dimension of \strong{A} (Caswell 2001).
}
\examples{
# Create a 3x3 irreducible PPM
( A <- matrix(c(0,1,2,0.5,0.1,0,0,0.6,0.6), byrow=TRUE, ncol=3) )
# Diagnose reducibility
isIrreducible(A)
# Create a 3x3 reducible PPM
B<-A; B[3,2] <- 0; B
# Diagnose reducibility
isIrreducible(B)
}
\references{
\itemize{
\item Caswell (2001) matrix Population Models, 2nd. ed. Sinauer.
}
}
\seealso{
Other PerronFrobeniusDiagnostics:
\code{\link{isErgodic}()},
\code{\link{isPrimitive}()}
}
\concept{Perron Frobenius}
\concept{PerronFrobeniusDiagnostics}
\concept{irreducible}
\concept{reducibility}
\concept{reducible}
|
670372b3f4b752790fb3a174dec7af11efdeea36
|
84e7b589d3d8b05e52e927dc7ce77b79515e71fa
|
/ch01 - 기초/03. 산술연산.r
|
98a76a9580463eef7fc52aea6c880532a54a7826
|
[
"MIT"
] |
permissive
|
Lee-changyul/Rstudy_Lee
|
d1e0f28190de74643d5c0a14f178b41250db7860
|
837a88d6cb4c0e223b42ca18dc5a469051b48533
|
refs/heads/main
| 2023-06-29T20:21:10.968106
| 2021-08-02T01:48:00
| 2021-08-02T01:48:00
| 325,493,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
03. 산술연산.r
|
# 산술연산
tot1 <- -5
tot2 <- 10
tot1+tot2
tot1-tot2
tot1*tot2
tot1/tot2
tot1^tot2
# 마지막 것은 제곱 **로 해도 됨
|
5222ab3858a4cb70c091f75ff01998ccd821686e
|
0f96b45966da3fd162b7d1810f413d23da242139
|
/PackageCH/man/get_frequency.Rd
|
e1af99e949c654f6cbd024997cb8dd5b499b9f93
|
[] |
no_license
|
duvaneljulien/PackageCH
|
0e281ba39edb0fd87678e96341f391661a7b57f8
|
9c127f8c5a0a33918cee76e3413a6b9fdade4328
|
refs/heads/master
| 2020-05-07T22:14:09.540087
| 2015-01-15T03:30:26
| 2015-01-15T03:30:26
| 29,278,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
rd
|
get_frequency.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{get_frequency}
\alias{get_frequency}
\title{Get frequency of minor allele}
\usage{
get_frequency(G, snp)
}
\arguments{
\item{G}{genomic matrix}
\item{snp}{a snp (index)}
}
\value{
frequency of the minor allele
}
\description{
Get frequency of the minor allele for the SNP snp
}
\author{
Julien Duvanel
}
|
bac9a57cdafaa3fef146d89a9e3ca3e33e4707d8
|
375ad9d11429898879d8d041c6beba145b70482a
|
/tests/testthat.R
|
0acd497b86b4bc1ec34d295aaf2be80b0c8ea12a
|
[
"MIT"
] |
permissive
|
LiYingWang/signatselect
|
d502485ba4d2c33dca0a541c7442a54348f25d8e
|
3ae3c71b2ad2a538acc7405f7a966d9ec0f4849c
|
refs/heads/master
| 2020-11-26T20:02:16.774709
| 2019-12-03T20:47:36
| 2019-12-03T20:47:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 68
|
r
|
testthat.R
|
library(testthat)
library(signatselect)
test_check("signatselect")
|
c0282097bd56288b6c5068132573ffa84a6ed884
|
78d291e32990ae26a021f752d34a8caabf03cdd8
|
/man/nz_internal_migration_summary.Rd
|
607166e0e2dbf05372fb2236ad2941bafb58301f
|
[] |
no_license
|
nacnudus/nzmigration
|
3e5bc96fa8e2e73e389a9a95a00e9956febbfc66
|
d920b6cb025c42eccbf70cc24d12acb1a026bd4a
|
refs/heads/master
| 2021-08-08T09:02:57.483169
| 2017-11-10T01:57:58
| 2017-11-10T01:57:58
| 110,188,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,421
|
rd
|
nz_internal_migration_summary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nz_internal_migration_summary.R
\docType{data}
\name{nz_internal_migration_summary}
\alias{nz_internal_migration_summary}
\title{New Zealand Internal Migration Summary Statistics 2013}
\format{A data frame with 351972 rows and 11 variables. This single table
includes data from all the separate tables in the original spreadsheet, so
some of the column names are generic (\code{var1} ... \code{var6}).
\itemize{
\item \code{table} Name of the table from the original spreadsheet
\item \code{title} Title of the table from the original spreadsheet
\item \code{subtitle} Subtitle of the table from the original spreadsheet
\item \code{var1} First variable of the table from the original spreadsheet
\item \code{...}
\item \code{var6} Sixth variable of the table from the original spreadsheet
\item \code{count} Number of people
\item \code{flag} Only one flag is used, \code{..C}, to mean 'confidential'.
}}
\source{
http://m.stats.govt.nz/~/media/Statistics/browse-categories/population/migration/internal-migration-tables/int-mig-2013-summary-tables.xlsx
http://m.stats.govt.nz/browse_for_stats/population/Migration/internal-migration/tables.aspx
}
\usage{
nz_internal_migration_summary
}
\description{
A dataset containing tables of summary statistics describing internal
migration in New Zealand, from the 2013 census.
}
\keyword{datasets}
|
a05e4080ecccbe827c741f1465a254002d123247
|
118f768abfa3d6a66cf2c93c9388460bb73cc848
|
/MinimumRemoval-50.R
|
92c1cce085273a34bff9ae6e03f47ecfd1d445fd
|
[] |
no_license
|
joshmeek-old/RWeka-Naive-Bayes-10-Fold-Cross-Validation
|
5fd69e6f7ffde0e20244a331af29cec5bf0c217c
|
926b1118a7a93e6b00426fb318c6ca1268183e18
|
refs/heads/master
| 2021-05-30T10:25:58.246959
| 2015-10-20T04:15:27
| 2015-10-20T04:15:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
r
|
MinimumRemoval-50.R
|
csv_data <- read.csv("~/anomalies/Processed_Data_50.csv")
colnames(csv_data)
data <- csv_data
for(i in 1:length(data[, 1])) {
minimum <- min(as.numeric(data[i, 54:103]))
for(k in 54:103) {
if(as.numeric(data[i, k]) == minimum)
data[i, k] <- '?'
}
}
write.csv(data, "~/anomalies/Processed_Data_50.csv")
|
1ce3fe79177b738f8ceec213d26a29cc3b774a4f
|
7cf661ab76d10b8f36cdf0a8e5bd087030594cf0
|
/scripts/gwsca_biallelic_vcf.R
|
952b356f40e5516117a5101ab06fc41a89608075
|
[] |
no_license
|
spflanagan/SCA
|
22326ed442260c5839e2406ef6463c6b6090681e
|
3ccbb84eebc3f908bcd96b3476bc7a99eb51853f
|
refs/heads/master
| 2020-04-12T01:20:55.415391
| 2018-05-09T23:24:07
| 2018-05-09T23:24:07
| 51,270,799
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,821
|
r
|
gwsca_biallelic_vcf.R
|
#Try doing gwsca_biallelic_vcf here.
source("E:/ubuntushare/SCA/scripts/plotting_functions.R")
setwd("E:/ubuntushare/SCA/results/biallelic")
calc.fst<-function(x,y){ #x is one row of a vcf file
if(x[1]==y[1] & x[2]==y[2]){
chr=x[1]
pos=x[2]
x<-unlist(x[3:length(x)])
x<-factor(x[x!="./."])
y<-unlist(y[3:length(y)])
y<-factor(y[y!="./."])
xy<-c(as.character(x),as.character(y))
gf.x<-table(x)/sum(table(x))
af.x<-table(unlist(lapply(as.character(x),strsplit,"/")))/
sum(table(unlist(lapply(as.character(x),strsplit,"/"))))
hs.x<-2*af.x[1]*af.x[2]
gf.y<-table(y)/sum(table(y))
af.y<-table(unlist(lapply(as.character(y),strsplit,"/")))/
sum(table(unlist(lapply(as.character(y),strsplit,"/"))))
hs.y<-2*af.y[1]*af.y[2]
if(length(af.x)>1 & length(af.y) > 1){
af<-table(unlist(lapply(as.character(xy),strsplit,"/")))/
sum(table(unlist(lapply(as.character(xy),strsplit,"/"))))
hs<-(hs.x*length(x)+hs.y*length(y))/(length(x)+length(y))
ht<-2*af[1]*af[2]
fst<-(ht-hs)/ht
return(data.frame(Chr=chr,Pos=pos,MajAF1=max(af.x),
MajAF2=max(af.y), N1=length(x),N2=length(y),Hs=hs,Ht=ht,Fst=fst))
} else {
return(data.frame(Chr=chr,Pos=pos,MajAF1=NA,
MajAF2=NA, N1=length(x),N2=length(y),Hs=NA,Ht=NA,Fst=NA))
}
} else {
print("Sort your vcfs! The two do not match up.")
}}
prune.vcf<-function(vcf.df,cov.per){
keep<-apply(vcf.df,1,function(x) length(x[x!="./."]))
names(keep)<-seq(1,length(keep))
keepnum<-cov.per*(ncol(vcf.df)-2)
vcf.df<-vcf.df[names(keep[keep > keepnum]),]
return(vcf.df)
}
vcf.orig<-read.delim("biallelic.gt.vcf")
info<-read.delim("ind_info_vcf.txt",col.names=c("name","ID","sex","age","status"))
#prune for overall coverage
vcf<-prune.vcf(vcf.orig,0.75)
adt<-cbind(vcf[,1:2],vcf[,colnames(vcf) %in% info[info$age=="ADULT",]$name])
off<-cbind(vcf[,1:2],vcf[,colnames(vcf) %in% info[info$age=="JUVIE",]$name])
fem<-cbind(vcf[,1:2],vcf[,colnames(vcf) %in% info[info$sex=="FEM",]$name])
mal<-cbind(vcf[,1:2],vcf[,colnames(vcf) %in% info[info$sex=="MAL",]$name])
mom<-cbind(vcf[,1:2],vcf[,colnames(vcf) %in% info[info$status=="MOM",]$name])
#calculate fsts, then prune
adt<-adt[order(c(adt$CHROM,adt$POS)),]
off<-off[order(c(off$CHROM,off$POS)),]
mal<-mal[order(c(mal$CHROM,mal$POS)),]
fem<-fem[order(c(fem$CHROM,fem$POS)),]
mom<-mom[order(c(mom$CHROM,mom$POS)),]
ao.fsts<-data.frame()
for(i in 1:nrow(vcf)){
ao.fsts<-rbind(ao.fsts,calc.fst(adt[i,],off[i,]))
}
fm.fsts<-data.frame()
for(i in 1:nrow(vcf)){
fm.fsts<-rbind(fm.fsts,calc.fst(mal[i,],fem[i,]))
}
md.fsts<-data.frame()
for(i in 1:nrow(vcf)){
md.fsts<-rbind(md.fsts,calc.fst(mom[i,],fem[i,]))
}
#Remove any NAs
ao.prune<-ao.fsts[!is.na(ao.fsts$Fst),]
fm.prune<-fm.fsts[!is.na(fm.fsts$Fst),]
md.prune<-md.fsts[!is.na(md.fsts$Fst),]
#Prune for per-group Coverage
ao.prune<-ao.prune[ao.prune$N1 >= 0.5*(ncol(adt)-2) &
ao.prune$N2 >= 0.5*(ncol(off)-2),]
fm.prune<-fm.prune[fm.prune$N1 >= 0.5*(ncol(mal)-2) &
fm.prune$N2 >= 0.5*(ncol(fem)-2),]
md.prune<-md.prune[md.prune$N1 >= 0.5*(ncol(mom)-2) &
md.prune$N2 >= 0.5*(ncol(fem)-2),]
#Prune for Allele Frequency
ao.prune<-ao.prune[ao.prune$MajAF1 > 0.05 & ao.prune$MajAF1 < 0.95 &
ao.prune$MajAF2 > 0.05 & ao.prune$MajAF2 < 0.95,]
fm.prune<-fm.prune[fm.prune$MajAF1 > 0.05 & fm.prune$MajAF1 < 0.95 &
fm.prune$MajAF2 > 0.05 & fm.prune$MajAF2 < 0.95,]
md.prune<-md.prune[md.prune$MajAF1 > 0.05 & md.prune$MajAF1 < 0.95 &
md.prune$MajAF2 > 0.05 & md.prune$MajAF2 < 0.95,]
#get model data
model<-read.delim("../sca_simulation_output/ddraddist.ss0.2alleles.fst_out.txt")
model.aj<-model[model$AOFst>0 & model$MaleAF < 0.95 & model$MaleAF > 0.05,]
aj.null<-c(mean(model.aj$AOFst)+2.57583*sd(model.aj$AOFst),
mean(model.aj$AOFst)-2.57583*sd(model.aj$AOFst))
model.mo<-model[model$MDFst>0 & model$FemAF < 0.95 & model$FemAF > 0.05,]
mo.null<-c(mean(model.mo$MDFst)+2.57583*sd(model.mo$MDFst),
mean(model.mo$MDFst)-(2.57583*sd(model.mo$MDFst)))
model.mf<-model[model$MFFst>0 & model$MaleAF < 0.95 & model$MaleAF > 0.05,]
mf.null<-c(mean(model.mf$MFFst)+(2.57583*sd(model.mf$MFFst)),
mean(model.mf$MFFst)-(2.57583*sd(model.mf$MFFst)))
#plot with the model CIs
png("fst.biallelic.pruned.Rcalc.model.png",height=300,width=300,units="mm",res=300)
par(mfrow=c(3,1),oma=c(1,1,0,0),mar=c(0,1,1,0),mgp=c(3,0.5,0), cex=1.5)
plot.fsts(ao.prune, ci.dat=aj.null,fst.name="Fst", chrom.name="CHROM"
, axis.size=0.75, bp.name="POS")
legend("top","Adult-Juvenile", cex=0.75,bty="n")
plot.fsts(fm.prune, ci.dat=mf.null,fst.name="Fst", chrom.name="CHROM"
, axis.size=0.75,bp.name="POS")
legend("top","Male-Female", cex=0.75,bty="n")
plot.fsts(md.prune, ci.dat=mo.null,fst.name="Fst", chrom.name="CHROM"
, axis.size=0.75,bp.name="POS")
legend("top","Mothers-Females", cex=0.75,bty="n")
mtext("Genomic Location", 1, outer=T, cex=1)
mtext("Fst", 2, outer=T, cex=1)
dev.off()
|
bfd16cd0f6a308d471a8a148c4b746171fd183c2
|
361f26a1727992a13c23dad72fb89efd5a1265d3
|
/code/sem_2018_density_exp.R
|
69342447cb517cc937d85a8452781dd5d08355a2
|
[] |
no_license
|
aekendig/microstegium-bipolaris
|
111c91dcc0740d13fd08e46a65df4ad63244c2c5
|
fce94d4d481bf6320adec296c8fbd50690805a65
|
refs/heads/master
| 2023-03-02T15:53:23.999028
| 2023-02-25T17:20:25
| 2023-02-25T17:20:25
| 175,024,595
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43,905
|
r
|
sem_2018_density_exp.R
|
##### info ####
# file: sem_2018_density_exp
# author: Amy Kendig
# date last edited: 1/21/21
# goal: fit SEM to focal plot-level data
#### set up ####
# clear all existing data
rm(list=ls())
# load packages
library(tidyverse)
library(lavaan)
library(GGally)
library(lavaan.survey)
library(semPower)
library(lavaanPlot)
# import plot information
plotsD <- read_csv("data/plot_treatments_2018_2019_density_exp.csv")
# import focal fitness data
mvSeedD1Dat <- read_csv("intermediate-data/mv_processed_seeds_2018_density_exp.csv")
# mv_seeds_data_processing_2018_density_exp.R and mv_biomass_data_processing_2018_density_exp.R
evSeedD1Dat <- read_csv("intermediate-data/ev_processed_seeds_both_year_conversion_2018_density_exp.csv")
# ev_seeds_data_processing_2018.R and ev_seeds_data_processing_2019.R
survD1Dat <- read_csv("intermediate-data/all_processed_survival_2018_density_exp.csv")
# all_survival_data_processing_2018
mvGermD1Dat1 <- read_csv("data/mv_germination_disease_set_1_2018_density_exp.csv")
mvGermD1Dat2 <- read_csv("data/mv_germination_disease_set_2_2018_density_exp.csv")
evGermDat <- read_csv("data/ev_germination_2018_2019_density_exp.csv")
# import growth data
growthD1Dat <- read_csv("intermediate-data/focal_processed_growth_2018_density_exp.csv")
# focal_growth_data_processing_2018_density_exp
# import severity data
sevD1Dat <- read_csv("intermediate-data/focal_leaf_scans_2018_density_exp.csv")
# leaf_scans_data_processing_2018_density_exp.R
# import environmental variables
envD1Dat <- read_csv("intermediate-data/covariates_2018_density_exp.csv") # covariate_data_processing_2018_density_exp
#### edit data ####
# plant group densities
plotDens <- plotsD %>%
mutate(Mv_seedling_density = case_when(background == "Mv seedling" ~ background_density + 3,
TRUE ~ 3),
Ev_seedling_density = case_when(background == "Ev seedling" ~ background_density + 3,
TRUE ~ 3),
Ev_adult_density = case_when(background == "Ev adult" ~ background_density + 1,
TRUE ~ 1)) %>%
select(plot, treatment, Mv_seedling_density, Ev_seedling_density, Ev_adult_density)
# survival
# make survival 1 if the plant produced seeds in summer
# remove NA's
survD1Datb <- survD1Dat %>%
filter(month == "September" & focal == 1) %>%
select(-month) %>%
mutate(survival = case_when(seeds_produced == 1 ~ 1,
TRUE ~ survival)) %>%
filter(!is.na(survival)) %>%
group_by(site, plot, treatment, sp, age) %>%
summarise(survival = mean(survival))
# 233 entries, the one missing is the Ev adult that was not virginicus
# severity data
sevD1Datb <- sevD1Dat %>%
mutate(severity = (lesion_area.pix * leaves_infec) / (leaf_area.pix * leaves_tot),
severity = ifelse(severity > 1, 1, severity)) %>%
select(month, site, plot, treatment, sp, ID, severity) %>%
pivot_wider(names_from = month,
names_glue = "{month}_severity",
values_from = severity)
sum(!is.na(sevD1Datb$jul_severity))
sum(!is.na(sevD1Datb$late_aug_severity))
sum(!is.na(sevD1Datb$sep_severity))
# germination
# average across trials
mvGermD1Dat <- mvGermD1Dat1 %>%
mutate(germination_final = ifelse(is.na(germination_final), germination_check_1, germination_final)) %>%
select(site_plot, trial, seeds, germination_final) %>%
full_join(mvGermD1Dat2 %>%
select(site_plot, trial, seeds, germination_final)) %>%
mutate(site = gsub(" .*$", "", site_plot),
plot = gsub(".* ","", site_plot) %>%
gsub("[^[:digit:]]", "", .) %>%
as.numeric(),
treatment = gsub(".* ","", site_plot) %>%
gsub("[^[:alpha:]]", "", .) %>%
as.factor() %>%
recode("F" = "fungicide", "W" = "water"),
site = ifelse(site == "P1", "D1", site)) %>%
group_by(site, plot, treatment) %>%
summarise(germination = mean(germination_final/seeds)) %>%
ungroup()
# select data from year 1
# correct reduction in emergents between week 3 and 4
# correct the increase in cut-tops
# correct repair of cut tops between weeks 3 and 4
# use average of three sites to add in missing value
evGermD1Dat <- evGermDat %>%
filter(seeds_planted > 0 & year == 2018) %>%
mutate(week_4_emerg = case_when(week_4_emerg < week_3_emerg ~ week_3_emerg,
TRUE ~ week_4_emerg),
week_3_cut_tops = case_when(week_4_cut_tops > week_3_cut_tops & week_4_cut_tops <= week_2_emerg ~ week_4_cut_tops,
TRUE ~ week_3_cut_tops),
week_4_cut_tops = case_when(week_4_cut_tops > week_2_emerg ~ week_3_cut_tops,
week_4_cut_tops < week_3_cut_tops ~ week_3_cut_tops,
TRUE ~ week_4_cut_tops),
week_3_new_emerg = week_3_emerg - week_3_cut_tops,
week_4_new_emerg = week_4_emerg - week_4_cut_tops,
emerg = week_2_emerg + week_4_new_emerg + week_4_soil_germ,
germination = emerg/seeds_planted) %>%
group_by(site, treatment, age) %>%
summarise(germination = mean(germination)) %>%
ungroup() %>%
group_by(treatment, age) %>%
mutate(germination2 = mean(germination)) %>%
ungroup %>%
full_join(tibble(site = "D3",
treatment = "fungicide",
age = "seedling",
germination = 0.161)) %>%
select(-germination2)
# seeds
evSeedD1Datb <- evSeedD1Dat %>%
filter(focal == 1 & ID_unclear == 0) %>%
group_by(site, plot, treatment, sp, age, ID) %>%
summarise(seeds = sum(seeds)) %>%
ungroup()
# combine data
mvDat <- mvSeedD1Dat %>%
full_join(growthD1Dat %>%
filter(sp == "Mv") %>%
select(site:ID, height_growth, tiller_growth)) %>%
full_join(survD1Datb %>%
filter(sp == "Mv")) %>%
full_join(sevD1Datb %>%
filter(sp == "Mv")) %>%
full_join(mvGermD1Dat) %>%
rowwise() %>%
mutate(late_aug_severity = ifelse(is.na(late_aug_severity),
mean(c(jul_severity, sep_severity), na.rm = T),
late_aug_severity)) %>%
ungroup() %>%
mutate(mv_seeds = log(total_seeds * germination * survival + 1),
mv_height = height_growth,
mv_tiller = tiller_growth,
mv_severity = asin(sqrt(late_aug_severity))) %>%
select(site, plot, treatment, sp, ID, mv_seeds, mv_height, mv_tiller, mv_severity)
# one plant is not Ev (remove from all: D2 7W Ev A)
# could not fit SEM with individual-level Ev severity (too many missing)
evDat <- evSeedD1Datb %>%
full_join(growthD1Dat %>%
filter(sp == "Ev") %>%
select(site:ID, height_growth:basal_growth) %>%
mutate(age = ifelse(ID == "A", "adult", "seedling"))) %>%
full_join(survD1Datb %>%
filter(sp == "Ev")) %>%
full_join(sevD1Datb %>%
filter(sp == "Ev" &
!(site == "D2" & plot == 7 & treatment == "water" & ID == "A")) %>%
rowwise() %>%
mutate(late_aug_severity = ifelse(is.na(late_aug_severity),
mean(c(jul_severity, sep_severity), na.rm = T),
late_aug_severity)) %>%
ungroup() %>%
group_by(site, plot, treatment, sp) %>%
summarise(late_aug_severity = mean(late_aug_severity, na.rm = T)) %>%
ungroup()) %>%
full_join(evGermD1Dat) %>%
filter(!(site == "D2" & plot == 7 & treatment == "water" & age == "adult")) %>%
mutate(seeds = replace_na(seeds, 0),
ev_seeds = log(seeds * germination * survival + 1),
ev_height = height_growth,
ev_tiller = tiller_growth,
ev_basal = basal_growth,
ev_severity = asin(sqrt(late_aug_severity))) %>%
select(site, plot, treatment, sp, ID, ev_seeds, ev_height, ev_tiller, ev_basal, ev_severity)
# make wide
dat <- mvDat %>%
select(-sp) %>%
pivot_wider(names_from = ID,
values_from = c(mv_seeds, mv_height, mv_tiller, mv_severity),
names_glue = "{.value}_{ID}") %>%
full_join(evDat %>%
select(-sp) %>%
pivot_wider(names_from = ID,
values_from = c(ev_seeds, ev_height, ev_tiller, ev_basal, ev_severity),
names_glue = "{.value}_{ID}")) %>%
full_join(envD1Dat) %>%
full_join(plotDens) %>%
mutate(fungicide = ifelse(treatment == "water", 0, 1),
log_mv_density = log(Mv_seedling_density),
log_evS_density = log(Ev_seedling_density),
log_evA_density = log(Ev_adult_density)) %>%
filter(!(site == "D4" & plot %in% c(8, 10) & treatment == "fungicide"))
#### visualizations ####
# histograms
ggplot(dat, aes(x = mv_seeds_1)) +
geom_histogram()
ggplot(mvDat, aes(x = mv_height)) +
geom_histogram()
ggplot(mvDat, aes(x = mv_tiller)) +
geom_histogram()
ggplot(mvDat, aes(x = mv_severity)) +
geom_histogram()
ggplot(evDat, aes(x = ev_seeds, fill = ID)) +
geom_histogram()
ggplot(evDat, aes(x = ev_height)) +
geom_histogram()
ggplot(evDat, aes(x = ev_tiller)) +
geom_histogram()
ggplot(evDat, aes(x = ev_basal)) +
geom_histogram()
ggplot(dat, aes(x = ev_severity_1)) +
geom_histogram()
# correlations
ggpairs(sevD1Datb %>%
select(jul_severity, late_aug_severity, sep_severity))
ggpairs(dat %>%
select(mv_seeds_1, mv_seeds_2, mv_seeds_3))
ggpairs(dat %>%
select(mv_height_1, mv_height_2, mv_height_3))
ggpairs(dat %>%
select(mv_tiller_1, mv_tiller_2, mv_tiller_3))
ggpairs(dat %>%
select(mv_severity_1, mv_severity_2, mv_severity_3))
ggpairs(dat %>%
select(mv_height_1, mv_tiller_1, mv_severity_1)) # tiller and height 0.24
ggpairs(dat %>%
select(mv_height_2, mv_tiller_2, mv_severity_2))
ggpairs(dat %>%
select(mv_height_3, mv_tiller_3, mv_severity_3))
ggpairs(dat %>%
select(ev_seeds_1, ev_seeds_2, ev_seeds_3, ev_seeds_A))
ggpairs(dat %>%
select(ev_height_1, ev_height_2, ev_height_3, ev_height_A))
ggpairs(dat %>%
select(ev_tiller_1, ev_tiller_2, ev_tiller_3, ev_tiller_A))
ggpairs(dat %>%
select(ev_basal_1, ev_basal_2, ev_basal_3, ev_basal_A))
ggpairs(dat %>%
select(ev_severity_1, ev_severity_2, ev_severity_3, ev_severity_A))
ggpairs(dat %>%
select(ev_seeds_1, ev_height_1, ev_tiller_1, ev_basal_1, ev_severity_1)) # height and severity 0.68, correlations among tiller, basal, and height, seeds and basal 0.35
ggpairs(dat %>%
select(ev_seeds_2, ev_height_2, ev_tiller_2, ev_basal_2, ev_severity_2)) # correlations among tiller, basal, and height, seeds and basal 0.38
ggpairs(dat %>%
select(ev_seeds_3, ev_height_3, ev_tiller_3, ev_basal_3, ev_severity_3)) # correlations among tiller, basal, and height, seeds and basal 0.41
ggpairs(dat %>%
select(ev_seeds_A, ev_height_A, ev_tiller_A, ev_basal_A, ev_severity_A)) # correlations among tiller, basal, and height, seeds and basal 0.23
#### fit density model ####
# define model
mod1 <- '# latent variables
mv_resources =~ mv_height_1 + mv_height_2 + mv_height_3 + mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_height_1 + ev_height_2 + ev_height_3 + ev_height_A + ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A + ev_basal_1 + ev_basal_2 + ev_basal_3 + ev_basal_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1 + ev_severity_2 + ev_severity_3 + ev_severity_A
# regressions
mv_resources ~ log_mv_density + log_evS_density + log_evA_density
mv_disease ~ fungicide + log_mv_density + log_evS_density + log_evA_density
mv_fitness ~ mv_resources + mv_disease
ev_resources ~ log_mv_density + log_evS_density + log_evA_density
ev_disease ~ fungicide + log_mv_density + log_evS_density + log_evA_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease'
# fit model
fit1 <- sem(mod1, data = dat,
missing = "fiml")
summary(fit1, fit.measures = T, standardized = T)
# does not converge
# refit model with "size" instead of separate size measurements
# use plot-level ev severity (too much missing data)
# okay to use non-log-transformed density
mod2 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
mv_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
mv_fitness ~ mv_resources + mv_disease
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease'
# fit model
fit2 <- sem(mod2, data = dat,
missing = "fiml")
summary(fit2, fit.measures = T, standardized = T)
# power analysis
pow2 <- semPower.postHoc(effect = 0.05, effect.measure = 'RMSEA',
alpha = 0.05, N = 78, df = fit2@test[[1]]$df)
summary(pow2)
# parameters to free
modificationIndices(fit2, sort. = TRUE, minimum.value = 3)
# I don't think any make sense to add in
# visualize
lavaanPlot(model = fit2,
node_options = list(shape = "box", fontname = "Helvetica"),
edge_options = list(color = "grey"),
coefs = TRUE,
covs = TRUE)
#### remove links in model ####
# remove Mv seedling density from disease (P = 0.994, Std.all = -0.001)
mod3 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
mv_disease ~ fungicide + Ev_seedling_density + Ev_adult_density
mv_fitness ~ mv_resources + mv_disease
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease'
# fit model
fit3 <- sem(mod3, data = dat,
missing = "fiml")
anova(fit2, fit3) # not sig diff
summary(fit3, fit.measures = T, standardized = T)
# constrain correlation between Mv and Ev fitness (P = 0.968, Std.all = -0.006)
mod4 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
mv_disease ~ fungicide + Ev_seedling_density + Ev_adult_density
mv_fitness ~ mv_resources + mv_disease
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
mv_fitness ~~ 0*ev_fitness'
# fit model
fit4 <- sem(mod4, data = dat,
missing = "fiml")
anova(fit3, fit4) # not sig diff
summary(fit4, fit.measures = T, standardized = T)
# remove Ev adult density from Mv disease (P = 0.959, Std.all = -0.005)
mod5 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
mv_disease ~ fungicide + Ev_seedling_density
mv_fitness ~ mv_resources + mv_disease
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
mv_fitness ~~ 0*ev_fitness'
# fit model
fit5 <- sem(mod5, data = dat,
missing = "fiml")
anova(fit4, fit5) # not sig diff
summary(fit5, fit.measures = T, standardized = T)
# remove Mv disease from Mv fitness (P = 0.918, Std.all = 0.012)
mod6 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
mv_disease ~ fungicide + Ev_seedling_density
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
mv_fitness ~~ 0*ev_fitness'
# fit model
fit6 <- sem(mod6, data = dat,
missing = "fiml")
anova(fit5, fit6) # not sig diff
summary(fit6, fit.measures = T, standardized = T)
# constrain correlation between Mv disease and Ev fitness (P = 0.984, Std.all = 0.003)
mod7 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
mv_disease ~ fungicide + Ev_seedling_density
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease'
# fit model
fit7 <- sem(mod7, data = dat,
missing = "fiml")
anova(fit6, fit7) # not sig diff
summary(fit7, fit.measures = T, standardized = T)
# remove Ev seedling density from Mv disease (P = 0.898, Std.all = 0.012)
mod8 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease'
# fit model
fit8 <- sem(mod8, data = dat,
missing = "fiml")
anova(fit7, fit8) # not sig diff
summary(fit8, fit.measures = T, standardized = T)
# remove Mv seedling density from Mv resources (P = 0.863, Std.all = 0.028)
mod9 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_seedling_density + Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease'
# fit model
fit9 <- sem(mod9, data = dat,
missing = "fiml")
anova(fit8, fit9) # not sig diff
summary(fit9, fit.measures = T, standardized = T)
# remove Ev seedling density from Mv resources (P = 0.761, Std.all = -0.045)
mod10 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ fungicide + Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease'
# fit model
fit10 <- sem(mod10, data = dat,
missing = "fiml")
anova(fit9, fit10) # not sig diff
summary(fit10, fit.measures = T, standardized = T)
# remove fungicide from Ev disease (P = 0.703, Std.all = -0.042)
mod11 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ mv_disease + ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease'
# fit model
fit11 <- sem(mod11, data = dat,
missing = "fiml")
anova(fit10, fit11) # not sig diff
summary(fit11, fit.measures = T, standardized = T)
# remove correlation between Mv resources and disease (P = 0.613, Std.all = -0.089)
mod12 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ ev_resources
ev_disease ~~ ev_resources + mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease'
# fit model
fit12 <- sem(mod12, data = dat,
missing = "fiml")
anova(fit11, fit12) # not sig diff
summary(fit12, fit.measures = T, standardized = T)
# remove correlation between Ev resources and disease (P = 0.470, Std.all = 0.130)
mod13 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_disease ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ ev_resources
ev_disease ~~ mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease'
# fit model
fit13 <- sem(mod13, data = dat,
missing = "fiml")
anova(fit12, fit13) # not sig diff
summary(fit13, fit.measures = T, standardized = T)
# remove Ev seedling density from Ev resources (P = 0.482, Std.all = 0.121)
mod14 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_adult_density
ev_disease ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ ev_resources
ev_disease ~~ mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease'
# fit model
fit14 <- sem(mod14, data = dat,
missing = "fiml")
anova(fit13, fit14) # not sig diff
summary(fit14, fit.measures = T, standardized = T)
# constrain correlation between Mv fitness and disease (P = 0.444, Std.all = -0.105)
mod15 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_adult_density
ev_disease ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
mv_resources ~~ ev_resources
ev_disease ~~ mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit15 <- sem(mod15, data = dat,
missing = "fiml")
anova(fit14, fit15) # not sig diff
summary(fit15, fit.measures = T, standardized = T)
# remove the corelation between Mv and Ev resources (P = 0.336, Std.all = 0.188)
mod16 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density + Ev_adult_density
ev_disease ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
ev_disease ~~ mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit16 <- sem(mod16, data = dat,
missing = "fiml")
anova(fit15, fit16) # not sig diff
summary(fit16, fit.measures = T, standardized = T)
# remove Ev adult density from Ev resources (P = 0.192, Std.all = 0.201)
mod17 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_disease ~ Mv_seedling_density + Ev_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
ev_disease ~~ mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit17 <- sem(mod17, data = dat,
missing = "fiml")
anova(fit16, fit17) # not sig diff
summary(fit17, fit.measures = T, standardized = T)
# remove Ev seedling density from Ev disease (P = 0.171, Std.all = 0.165)
mod18 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_disease ~ Mv_seedling_density + Ev_adult_density
ev_fitness ~ ev_resources + ev_disease
# correlations
ev_disease ~~ mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit18 <- sem(mod18, data = dat,
missing = "fiml")
anova(fit17, fit18) # warning because Ev seedling density was removed from the model, chisq and BIC decreased, AIC didn't
summary(fit18, fit.measures = T, standardized = T)
# remove Ev adult density from Ev disease (P = 0.170, Std.all = 0.153)
mod19 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3 + ev_seeds_A
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_disease ~ Mv_seedling_density
ev_fitness ~ ev_resources + ev_disease
# correlations
ev_disease ~~ mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit19 <- sem(mod19, data = dat,
missing = "fiml")
anova(fit18, fit19) # not sig diff
summary(fit19, fit.measures = T, standardized = T)
# remove Ev A seeds from Ev fitness (P = 0.163, Std.all = 0.189)
mod20 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_disease ~ Mv_seedling_density
ev_fitness ~ ev_resources + ev_disease
# correlations
ev_disease ~~ mv_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit20 <- sem(mod20, data = dat,
missing = "fiml")
anova(fit19, fit20) # warning about using different variables, but all three fit measures decreased
summary(fit20, fit.measures = T, standardized = T)
# remove Mv and Ev disease correlation (P = 0.132, Std.all = -0.203)
mod21 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3 + ev_tiller_A
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_disease ~ Mv_seedling_density
ev_fitness ~ ev_resources + ev_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit21 <- sem(mod21, data = dat,
missing = "fiml")
anova(fit20, fit21) # not sig diff
summary(fit21, fit.measures = T, standardized = T)
# remove Ev A tiller from Ev resources (P = 0.121, Std.all = 0.331)
mod22 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_disease ~ Mv_seedling_density
ev_fitness ~ ev_resources + ev_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit22 <- sem(mod22, data = dat,
missing = "fiml")
anova(fit21, fit22) # warning about different number of variables, but all fit measures decreased
summary(fit22, fit.measures = T, standardized = T)
# remove Mv seedling density from Ev resource (P = 0.080, Std.all = -0.283)
mod23 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_disease ~ Mv_seedling_density
ev_fitness ~ ev_resources + ev_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit23 <- sem(mod23, data = dat,
missing = "fiml")
anova(fit22, fit23) # significantly worse - keep in
# remove Mv seedling density from Ev disease (P = 0.074, Std.all = 0.199)
mod24 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_fitness ~ ev_resources + ev_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit24 <- sem(mod24, data = dat,
missing = "fiml")
anova(fit22, fit24) # not sig diff
summary(fit24, fit.measures = T, standardized = T)
# remove Ev resources from Ev fitness (P = 0.058, Std.all = 0.337)
mod25 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_fitness ~ ev_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit25 <- sem(mod25, data = dat,
missing = "fiml")
anova(fit24, fit25) # warning about comparison, fit metrics all increased
summary(fit25, fit.measures = T, standardized = T)
# remove Ev resources from Ev fitness and constrain correlations that appear because of this (P = 0.058, Std.all = 0.337)
mod26 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_resources ~ Mv_seedling_density
ev_fitness ~ ev_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease
ev_resources ~~ 0*mv_fitness + 0*mv_disease + 0*ev_fitness'
# fit model
fit26 <- sem(mod26, data = dat,
missing = "fiml")
anova(fit24, fit26) # model with Ev resources significantly better
# check again to make sure Mv seedling density should be included (P = 0.079, Std.all = -0.283)
mod27 <- '# latent variables
mv_resources =~ mv_tiller_1 + mv_tiller_2 + mv_tiller_3
mv_fitness =~ mv_seeds_1
mv_disease =~ mv_severity_1 + mv_severity_2 + mv_severity_3
ev_resources =~ ev_tiller_1 + ev_tiller_2 + ev_tiller_3
ev_fitness =~ ev_seeds_1 + ev_seeds_2 + ev_seeds_3
ev_disease =~ ev_severity_1
# regressions
mv_resources ~ Ev_adult_density
mv_disease ~ fungicide
mv_fitness ~ mv_resources
ev_fitness ~ ev_resources + ev_disease
# constraints
ev_fitness ~~ 0*mv_fitness + 0*mv_disease
mv_fitness ~~ 0*mv_disease'
# fit model
fit27 <- sem(mod27, data = dat,
missing = "fiml")
anova(fit24, fit27) # warning about variables, AIC and BIC increased
# visualize
lavaanPlot(model = fit24,
node_options = list(shape = "box", fontname = "Helvetica"),
edge_options = list(color = "grey"),
coefs = TRUE,
covs = TRUE)
|
1748b6b95395d719e07f5966ae653eca85c779df
|
f2778192f431b0c3dfcd070cd12033c4831ac485
|
/ManureSelection.R
|
8751e225592ecb5537bc4f1c819bf07bd2d3b267
|
[] |
no_license
|
MathotM/ValidationCode
|
4e7c9b7d50ecb69107ad35a85e6b772e3bcbd55f
|
d0033ddebd40a8be3ac76f2f18b9c0e1863850b5
|
refs/heads/master
| 2020-03-20T00:41:57.498111
| 2018-06-14T14:37:52
| 2018-06-14T14:37:52
| 137,053,044
| 0
| 1
| null | 2018-06-14T14:37:53
| 2018-06-12T10:02:38
|
R
|
UTF-8
|
R
| false
| false
| 3,399
|
r
|
ManureSelection.R
|
# # function for selection in Manure Data and selection on time basis
ManureSelection<-function(
PathRawDataManureAmount=PathRawDataManureAmount,
PathRawDataTemperature=PathRawDataTemperature,
PathRawDataAnalysis=PathRawDataAnalysis,
ManureAmountCompiledDataName=ManureAmountCompiledDataName,
ManureTemperatureCompiledDataName=ManureTemperatureCompiledDataName,
ManureAnalysisCompiledDataName=ManureAnalysisCompiledDataName,
StartData=StartData,
EndData=EndData,
TimeZone=TimeZone
){
library(lubridate)
ManureAmountSelectedData.df<-try(read.csv(file=paste(PathRawDataManureAmount,ManureAmountCompiledDataName,sep=c("/")),sep=c(";"),dec=c(","),header=TRUE,stringsAsFactors = FALSE),silent = TRUE)
ManureTemperatureSelectedData.df<-try(read.csv(file=paste(PathRawDataTemperature,ManureTemperatureCompiledDataName,sep=c("/")),sep=c(";"),dec=c(","),header=TRUE,stringsAsFactors = FALSE),silent = TRUE)
ManureAnalysisSelectedData.df<-try(read.csv(file=paste(PathRawDataAnalysis,ManureAnalysisCompiledDataName,sep=c("/")),sep=c(";"),dec=c(","),header=TRUE,stringsAsFactors = FALSE),silent = TRUE)
#Time Setting
if(class(ManureAmountSelectedData.df)!=c("try-error")){ManureAmountSelectedData.df$Time<-as_datetime(ymd_hms(ManureAmountSelectedData.df$Time,tz=TimeZone),tz=TimeZone)}
if(class(ManureTemperatureSelectedData.df)!=c("try-error")){ManureTemperatureSelectedData.df$Time<-as_datetime(ymd_hms(ManureTemperatureSelectedData.df$Time,tz=TimeZone),tz=TimeZone)}
if(class(ManureAnalysisSelectedData.df)!=c("try-error")){ManureAnalysisSelectedData.df$Time<-as_datetime(ymd(ManureAnalysisSelectedData.df$Time,tz=TimeZone),tz=TimeZone)}
#Time boundaries
if(class(StartData)==c("character")){StartData<-as_datetime(ymd_hms(StartData,tz=TimeZone),tz=TimeZone)}
if(class(EndData)==c("character")){EndData<-as_datetime(ymd_hms(EndData,tz=TimeZone),tz=TimeZone)}
#Selection on time
if(class(ManureAmountSelectedData.df)!=c("try-error")){ManureAmountSelectedData.df<-ManureAmountSelectedData.df[ManureAmountSelectedData.df$Time>StartData&ManureAmountSelectedData.df$Time<EndData,]}
if(class(ManureTemperatureSelectedData.df)!=c("try-error")){ManureTemperatureSelectedData.df<-ManureTemperatureSelectedData.df[ManureTemperatureSelectedData.df$Time>StartData&ManureTemperatureSelectedData.df$Time<EndData,]}
if(class(ManureAnalysisSelectedData.df)!=c("try-error")){ManureAnalysisSelectedData.df<-ManureAnalysisSelectedData.df[ManureAnalysisSelectedData.df$Time>StartData&ManureAnalysisSelectedData.df$Time<EndData,]}
#Replacement of error DF by 0
if(class(ManureAmountSelectedData.df)==c("try-error")){ManureAmountSelectedData.df<-NA}
if(class(ManureTemperatureSelectedData.df)==c("try-error")){ManureTemperatureSelectedData.df<-NA}
if(class(ManureAnalysisSelectedData.df)==c("try-error")){ManureAnalysisSelectedData.df<-NA}
ManureSelectedData.list<-list(ManureAmount=ManureAmountSelectedData.df,ManureTemperature=ManureTemperatureSelectedData.df,ManureAnalysis=ManureAnalysisSelectedData.df)
return(ManureSelectedData.list)
}
|
db2b34257a6163fa58756c76b955b1b218ea1ad8
|
375e98a79ccefec3d226edbb3cb8e03e8e3b01a2
|
/man/combination_palette.Rd
|
f8c5db651128e836bca084f7d35e7cead70f3fb9
|
[
"MIT"
] |
permissive
|
sekingsley/MicrobiomeR
|
e1325c519e34bcbc32bf75cab0a65a4f0df07a67
|
dfc8f67d88f7a9dfaa15753a369df73ccc948396
|
refs/heads/master
| 2022-12-01T13:29:05.960005
| 2019-08-30T20:35:06
| 2019-08-30T20:35:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,849
|
rd
|
combination_palette.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/palettes.R
\name{combination_palette}
\alias{combination_palette}
\title{Combine Color Palettes}
\usage{
combination_palette(...)
}
\arguments{
\item{...}{You can use any name for your arguments, but the values must be a named list.
The list can only have 4 named members:
\describe{
\item{palette}{This is a palette function that returns a vector of colors.}
\item{args}{This is another named list used for the palette function parameters.}
\item{range}{This is a range \emph{(1:10)} used to subset the color palette vector.}
\item{rev}{This is a logical \emph{(TRUE/FALSE)} used to reverse the color palette.}
}
You can add as many parameters you want in order to combine as many color palettes
as you want.}
}
\value{
The output of this function is another function (grDevoces::colorRampPalette), which takes
a number to generate an interpolated color palette as a character vector.
}
\description{
This function uses dynamic arguments (...) in order to combine multiple
color palettes together.
}
\details{
This function allows you to combine a varying number of color palettes and gives you
the ability to subset and reverse the palettes that are supplied.
}
\examples{
\dontrun{
if(interactive()){
# Below is the code for the viridis_magma_palette function.
# It's a good example of how to use the combination_palette function.
viridis_magma_palette <- function(viridis_number = 800,
viridis_range = 300:viridis_number,
viridis_rev = TRUE,
magma_number = 500,
magma_range = 0:magma_number,
magma_rev = FALSE,
...) {
if (!missing(...)){
v_args = list(n=viridis_number, ...)
m_args = list(n=magma_number, ...)
} else {
v_args = list(n=viridis_number)
m_args = list(n=magma_number)
}
crp <- combination_palette(viridis =
list(palette = viridis::viridis,
args = v_args,
range = viridis_range,
rev = viridis_rev),
magma =
list(palette = viridis::magma,
args = m_args,
range = magma_range,
rev = magma_rev)
)
return(crp)
}
}
}
}
\seealso{
\code{\link[grDevices]{colorRamp}}
Other Color Palettes: \code{\link{get_color_palette}},
\code{\link{scico_palette}},
\code{\link{viridis_magma_palette}},
\code{\link{viridis_palette}}
}
\concept{Color Palettes}
|
c947ace1f8a65c8e501f8426fe8d5c84c4161f0d
|
4a5693eb7b8d7e2327a730ee019da04557544d7f
|
/src/webPage/fig97.r
|
c6451b46d8a439e3c5c6b3649fc3d3b883ce83be
|
[] |
no_license
|
cuzaheta/antiCharts
|
0424e4f9707a18a5ed123ec6a87f2a50e22c2ed2
|
fa6bfdb7b4dedcd99bcb6422f87bdbb039a27c6d
|
refs/heads/master
| 2020-06-28T04:10:31.651671
| 2019-08-02T00:46:53
| 2019-08-02T00:53:01
| 200,139,276
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,430
|
r
|
fig97.r
|
##### R-code for making Figure 9.7. It reads data from "soil.dat"
dat <- matrix(scan("soil.dat"),189,6,byrow=T)
id <- dat[,1]
x1 <- dat[,2]
x2 <- dat[,3]
x3 <- dat[,4]
x4 <- dat[,5]
x5 <- dat[,6]
postscript("fig97.ps",width=6.5,height=7,horizontal=F)
par(mfrow=c(3,2),mar=c(4,4,2,2))
tx1 <- ar.yw(x1)
d1 <- density(tx1$resid[4:189],width=0.5)
plot(d1$x,d1$y,xlim=c(-3,3),
type="l",lty=1,xlab=expression(X[1]),ylab="density",
mgp=c(2,1,0),cex=0.7)
tx2 <- ar.yw(x2)
d2 <- density(tx2$resid[3:189],width=0.5)
plot(d2$x,d2$y,xlim=c(-3,3),
type="l",lty=1,xlab=expression(X[2]),ylab="density",
mgp=c(2,1,0),cex=0.7)
tx3 <- ar.yw(x3)
d3 <- density(tx3$resid[2:189],width=0.5)
plot(d3$x,d3$y,xlim=c(-3,3),
type="l",lty=1,xlab=expression(X[3]),ylab="density",
mgp=c(2,1,0),cex=0.7)
tx4 <- ar.yw(x4,order.max=1)
d4 <- density(tx4$resid[2:189],width=0.5)
plot(d4$x,d4$y,xlim=c(-3,3),
type="l",lty=1,xlab=expression(X[4]),ylab="density",
mgp=c(2,1,0),cex=0.7)
tx5 <- ar.yw(x5)
d5 <- density(tx5$resid[2:189],width=0.5)
plot(d5$x,d5$y,xlim=c(-3,3),
type="l",lty=1,xlab=expression(X[5]),ylab="density",
mgp=c(2,1,0),cex=0.7)
graphics.off()
# write.table(cbind(tx1$resid,tx2$resid,tx3$resid,tx4$resid,tx5$resid),
# "resid.dat",sep=" ")
|
d6b8e9e0e066466c277db57680514673c0b32391
|
b022e68f0139455784d95133deb4cf8f487142ce
|
/R/GLVmix.R
|
0eb968f1f1f3dba84978a1f982c9ded2bed1b362
|
[] |
no_license
|
cran/REBayes
|
1e311610a28f37509d2da28f81385b17e84b4bbf
|
e26237baf78f2dc4bb776ae29a2ddfce68963435
|
refs/heads/master
| 2022-05-13T04:13:53.603760
| 2022-03-22T17:20:02
| 2022-03-22T17:20:02
| 17,681,954
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,181
|
r
|
GLVmix.R
|
#' NPMLE of Gaussian Location-Scale Mixture Model
#'
#' A Kiefer-Wolfowitz procedure for ML estimation of a Gaussian model with
#' possibly dependent mean and variance components. This version differs from
#' \code{WGLVmix} in that it doesn't assume the data is in longitudinal form.
#' This version assumes a general bivariate distribution for the mixing
#' distribution. The defaults use a rather coarse bivariate gridding.
#'
#' @param t A vector of location estimates
#' @param s A vector of variance estimates
#' @param m A vector of sample sizes of the same length as t and s, or if scalar
#' a common sample size length
#' @param u A vector of bin boundaries for the location effects
#' @param v A vector of bin boundaries for the variance effects
#' @param ... optional parameters to be passed to KWDual to control optimization
#' @return A list consisting of the following components:
#' \item{u}{midpoints of mean bin boundaries}
#' \item{v}{midpoints of variance bin boundaries}
#' \item{fuv}{the function values of the mixing density.}
#' \item{logLik}{log likelihood value for mean problem}
#' \item{du}{Bayes rule estimate of the mixing density means.}
#' \item{dv}{Bayes rule estimate of the mixing density variances.}
#' \item{A}{Constraint matrix}
#' \item{status}{Mosek convergence status}
#' @author R. Koenker and J. Gu
#' @references Gu, J. and R. Koenker (2014) Heterogeneous Income Dynamics: An
#' Empirical Bayes Perspective, \emph{JBES},35, 1-16.
#'
#' Koenker, R. and J. Gu, (2017) REBayes: An {R} Package for Empirical Bayes Mixture Methods,
#' \emph{Journal of Statistical Software}, 82, 1--26.
#' @seealso WTLVmix for an implementation assuming independent heterogeneity, and WGLVmix
#' for a version that requires access to a full longitudinal data structure.
#' @keywords nonparametric
#' @export
GLVmix <- function (t, s, m, u = 30, v = 30, ...)
{
n <- length(t)
w <- rep(1, n)/n
eps <- 1e-04
if(length(m) == 1) m <- rep(m, length(t))
r <- (m - 1)/2
if (length(u) == 1)
u <- seq(min(t) - eps, max(t) + eps, length = u)
if (length(v) == 1)
v <- seq(min(s) - eps, max(s) + eps, length = v)
v <- v[v > 0]
pu <- length(u)
du <- rep(1, pu)
pv <- length(v)
dv <- rep(1, pv)
Av <- matrix(NA, n, pv)
for (i in 1:n){
for (j in 1:pv){
Av[i,j] = dgamma(s[i], r[i], scale = v[j]/r[i])
}
}
Av <- outer(Av, rep(1, pu))
Av <- aperm(Av,c(1,3,2))
Au <- dnorm(outer(outer(t, u, "-") *
outer(sqrt(m), rep(1, pu)), sqrt(v), "/"))
Au <- Au/outer(outer(1/sqrt(m), rep(1, pu)), sqrt(v))
Auv <- Av * Au
A <- NULL
for (j in 1:pv) A <- cbind(A, Auv[, , j])
duv = as.vector(kronecker(du, dv))
f <- KWDual(A, duv, w, ...)
fuv <- f$f
uv <- expand.grid(alpha = u, theta = v)
g <- as.vector(A %*% (duv * fuv))
logLik <- n * sum(w * log(f$g))
du <- A%*%(uv[,1] * duv * fuv)/g #Bayes rule for u: E(u|t,s)
dv <- A %*% (uv[,2] * duv * fuv)/g # Bayes rule for v: E(v|t,s)
z <- list(u = u, v = v, fuv = fuv, logLik = logLik,
du = du, dv = dv, A = A, status = f$status)
class(z) <- "GLVmix"
z
}
|
57ae371215d9c854ca54aae1b2f7cae97f1ec3cf
|
4b54dd2d3e036ec5a43d52b4d6c77f21f2ce6bee
|
/man/compute_projection.Rd
|
2c6d7a8403a3ba43acce918787d4faa31fe9c64f
|
[
"Apache-2.0"
] |
permissive
|
mitre/earthtools
|
b3639db4c34e334d595d09d7fd2adf4b176582e6
|
1a755b32b7a66aa1829668feebf959308a181dbb
|
refs/heads/master
| 2023-08-23T02:14:57.755600
| 2019-01-04T18:30:28
| 2019-01-04T18:30:28
| 117,014,541
| 4
| 1
|
Apache-2.0
| 2018-10-16T12:54:05
| 2018-01-10T21:44:45
|
R
|
UTF-8
|
R
| false
| true
| 3,333
|
rd
|
compute_projection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/projection.R
\name{compute_projection}
\alias{compute_projection}
\alias{compute_projection.data.frame}
\alias{compute_projection.numeric}
\title{compute location of coordinates after great circle projection}
\usage{
compute_projection(x, ..., method = "GC")
\method{compute_projection}{data.frame}(.data, latitude, longitude,
bearing, distance, method = "GC")
\method{compute_projection}{numeric}(latitude, longitude, bearing,
distance, output_type = "data.table", method = "GC")
}
\arguments{
\item{x}{An object used to determine which implementation to use}
\item{...}{It's an S3 thing. You wouldn't understand.}
\item{method}{Either \code{"GC"} [default] or \code{"rhumb"}. Used to declare
either a great circle calculation or rhumb line calculation}
\item{.data}{An object that inherits from \code{\link[base]{data.frame}}. In
general this will be on of \code{data.frame},
\code{\link[data.table]{data.table}}, or \code{\link[dplyr]{tbl_df}}}
\item{latitude}{Either a numeric vector of latitudes [degrees] or the column
of \code{.data} which contains latitudes. This maybe quoted or unquoted;
see examples.}
\item{longitude}{Either a numeric vector of longitudes [degrees]
corresponding with latitude or the column of \code{.data} which contains
longitudes. This maybe quoted or unquoted; see examples.}
\item{bearing}{Either a numeric vector of bearings [degrees] or the column of
\code{.data} which contains bearings/headings. This maybe quoted or
unquoted see examples.}
\item{distance}{Either a numeric vector of projection distance [nautical
miles] or the column of \code{.data} which contains projection distances.
This maybe quoted or unquoted see examples.}
\item{output_type}{string in \code{c("matrix", "data.table", "data.frame",
"list")}}
}
\value{
If \code{.data} is supplied, an object of the same type and with the
same columns as \code{.data} plus two more, \code{end_latitude} and
\code{end_longitude}. Otherwise, an object of type determined by
output_type which will generally have two columns, latitude and longitude.
If the input coordinates have length 1, then a named numeric vector is
returned.
}
\description{
This function provides a convienant wrapper to
\code{\link[geosphere]{destPoint}}
}
\examples{
# basic use
compute_projection(39.86167, -104.6732, 90, 15)
compute_projection(39.86167, -104.6732, 86:90, 1:15)
# use inside a data.table
library(data.table)
apts <- data.table(airport=c("ATL", "DEN", "ORD", "SEA"),
latitude=c(33.63670, 39.86167, 41.97933, 47.44989),
longitude=c(-84.42786, -104.67317, -87.90739, -122.31178))
apts[, c("platitude", "plongitude"):=compute_projection(latitude, longitude, 90, 15)]
# use with magrittr
library(magrittr)
apts \%>\% compute_projection(latitude, longitude, 90, 15)
# columns as strings
lat_col <- names(apts)[2]
apts \%>\% compute_projection(lat_col, "longitude", 90, 15)
# predict next position
tracks <- data.frame(id = c("a","b","c"),
lat = 0,
lon = 0,
heading = 30,
ground_speed = seq(300,360, 30))
time_step <- 1/60 #one minute
tracks \%>\% compute_projection(lat, lon, heading, tracks$ground_speed*time_step)
}
|
f0792283eaed9326d9d80d01bdb7f5acaa3b8b3e
|
e06b022ffe0db9df44b131b3b71361c710d0655f
|
/Codes R/My codes for week 2/cyclo_order(useless).R
|
bc2e853d8207d5c4f62048bb51c1b561b0021251
|
[] |
no_license
|
rodrigodealexandre/Bioinformatics-Algorithms
|
551a1219e46d657fb11625c081cdbd4c641cc33e
|
86127d1b594404176e99d5ef542c2b3f6c58d2ab
|
refs/heads/master
| 2020-12-25T18:23:33.625343
| 2014-12-11T02:16:34
| 2014-12-11T02:16:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,073
|
r
|
cyclo_order(useless).R
|
#-------------------------------------------------
setwd("D:/Dropbox/Courses/Coursera Courses/Bioinformatics Algorithms (Part 1)/Bioinformatics-Algorithms/Codes R/My codes for week 2")
pep_mass <- read.table("integer_mass_table.txt")
source("mass_spectrum.R")
cyclo_order <-function(spectrum){
if(length(spectrum) == 1){
spectrum <- strsplit(spectrum, "\\s" )[[1]]
}
if(length(spectrum) == 1){
spectrum <- linear_mass_spectrum(spectrum)
}
if(length(spectrum) > 2){
spectrum <- spectrum[which(spectrum %in% pep_mass[,2])]
spectrum <- as.numeric(spectrum)
cyclo_order <- list(NULL)
n <- 1
for(i in 1:length(spectrum)){
cyclo_order[[n]] <- spectrum[c(i:length(spectrum),1:i-1)]
cyclo_order[[n+1]] <- rev(spectrum[c(i:length(spectrum),1:i-1)])
n <- n + 2
}
cyclo_order <- unlist(lapply(cyclo_order, paste, collapse="-"))
return(cyclo_order)
}
else{
return(cat("Invalid imput"))
}
}
|
b4355fa9f517dc192ac3668356d07443b224a4fe
|
dd39cf7fc295196fcba0897eae1119d64a7c744e
|
/NewFunction.R
|
ca5f9f942145dc2a42840798612dd2a5ebc26ed7
|
[] |
no_license
|
HannahPye96/Hello_World
|
cc3dce7a292e335e9d02580ab17b559dd60dcc98
|
03ac4e3d229d17cdf2a9488014074dcc5c9bc529
|
refs/heads/master
| 2020-03-29T02:34:43.165488
| 2018-09-19T12:31:56
| 2018-09-19T12:31:56
| 149,443,949
| 0
| 0
| null | 2018-09-19T12:31:57
| 2018-09-19T12:00:35
|
R
|
UTF-8
|
R
| false
| false
| 95
|
r
|
NewFunction.R
|
invlogit = function(x){exp(x)/(exp(x)+1)}
# this is an ironically-clever edit to this function
|
7fe4512a29cc872447fe1635e332fc0ddea262ee
|
006a9f1f60379f29d959a7f2ec8e66a833816406
|
/test_process/Step_1_to_7_IsoDeconvMM.R
|
e603d840a066ca5749adafa6fb45caa2a061eb8f
|
[] |
no_license
|
Sun-lab/deconvolution
|
3f35c7fdc539a613973944fc1cc3252210acba1b
|
4fb77b8e0f90d137e5f33823e0bbd3666488c9fc
|
refs/heads/master
| 2021-01-04T04:20:02.688749
| 2020-02-24T19:02:00
| 2020-02-24T19:02:00
| 240,384,122
| 0
| 0
| null | 2020-02-13T22:57:00
| 2020-02-13T22:57:00
| null |
UTF-8
|
R
| false
| false
| 9,803
|
r
|
Step_1_to_7_IsoDeconvMM.R
|
## Running of actual IsoDeconvMM R package
# Required Libraries:
library(gtools)
# library(IsoDeconvMM)
# source("https://bioconductor.org/biocLite.R")
# biocLite("cummeRbund")
library(cummeRbund)
# Had to downgrade to RSQ-lite v 1.1-2 --> ?
library(alabama)
library(stringr)
prequel = "/home/hheiling_unc_edu/IsoDeconvMM/R/"
to_source = c("isoDeconv_geneModel_revised.R",
"geneModel_multcell_edit.R",
"loadData_edit.R",
"pdist_gen.R",
"Production_Functions_MixedSamp.R",
"Production_Functions_PureSamp.R",
"rem_clust.R",
"EffectiveLength_Total.R")
source_files = str_c(prequel, to_source)
for(i in 1:length(to_source)){
source(file = source_files[i])
}
# Inputs
file = "set1_50_set2_50" # Should be one of the comboLabels provided in FragLengths_Function_mixture.R
sys_statement1 = sprintf("countData=c(\"mf_%s_counts.txt\",
\"mm9_set1_counts.txt\",
\"mm9_set2_counts.txt\")",file)
eval(parse(text=sys_statement1))
labels = c("mix","set1_ref1", "set2_ref1")
cellTypes = c("mix","set1","set2")
fragSizeFile = sprintf("%s_fraglens.txt",file)
bedFile = "Mus_musculus.NCBIM37.67.nonoverlap.exon.bed"
knownIsoforms = "Mus_musculus.NCBIM37.67.nonoverlap.exon.knownIsoforms.RData"
readLen=76
eLenMin=1
lmax=600
# folder = "/home/hheiling_unc_edu/deconvolution/test_process/test_materials"
total_cts = numeric(length(countData))
for(i in 1:length(countData)){
countsi = read.table(countData[i], as.is = T)
counts_col = countsi[,1]
total_cts[i] = sum(counts_col)
}
# Step 1
files = file
final_geneMod = list()
for(j in 1:length(files)){
# Call dev_compiled_geneMod function
fin_geneMod = dev_compiled_geneMod(countData=countData,labels = labels,total_cts = total_cts,
cellTypes=cellTypes, bedFile=bedFile,knownIsoforms=knownIsoforms,
fragSizeFile=fragSizeFile,readLen=readLen,lmax=lmax,
eLenMin=eLenMin)
final_geneMod[[j]] = fin_geneMod
}
# Warning message:
# In fin_geneMod["Sample_Info"] <- list(info = info_mat, tclust_tot = length(fin_geneMod), :
# number of items to replace is not a multiple of replacement length
save(final_geneMod, file = "Step1_final_geneMod.RData")
# Step 2
#-----------------------------------------------------------------------------#
# ESTABLISHING CLUSTERS WITH HIGHEST LEVELS OF DISCRIMINATORY CAPABILITY #
#-----------------------------------------------------------------------------#
#------------------ Identify Highly Discriminatory Clusters -------------------#
# Pick genes from chromosome 18 with 3 or more isoforms (transcripts)
# siggenes object from "Exploring RData Objects.R"
# Columns: geneId, nE, nT, clustID
# Folder: deconvolution/test_process/test_materials/
load("chr18_siggenes.RData")
finalOut2 = siggenes
# Step 3
analy_genes = finalOut2$geneId
significant_geneMod = list()
for(j in 1:length(final_geneMod)){
fin_geneMod = final_geneMod[[j]]
indices2chk = which(names(fin_geneMod)!="Sample_Info")
indices_tmp = NULL
indices=NULL
# indices_tmp = rep(0,length(geneMod)) # What is this geneMod object?
# Idea: should be fin_geneMod instead of geneMod
indices_tmp = rep(0,length(fin_geneMod))
for(i in indices2chk){
infodf = fin_geneMod[[i]]$info
genesi = unique(infodf$gene)
genesi = unique(unlist(strsplit(x=genesi,split = ":")))
if(any(genesi %in% analy_genes)){indices_tmp[i]=1}
}
indices = which(indices_tmp==1)
sig_geneMod = fin_geneMod[indices]
sig_geneMod["Sample_Info"] = fin_geneMod["Sample_Info"]
sig_geneMod = rem_clust(geneMod = sig_geneMod,co = 5,min_ind = 0)
significant_geneMod[[j]] = sig_geneMod
}
save(significant_geneMod, file = "Step3_significant_geneMod.RData")
# Step 4
#-------------------------------------------------------------------#
# EDIT TO GROUP CELL TYPES #
#-------------------------------------------------------------------#
modified_sig_geneMod = list()
for(f in 1:length(significant_geneMod)){
sig_geneMod = significant_geneMod[[f]]
info_mat = sig_geneMod[["Sample_Info"]]
cellTypes = unique(info_mat$Cell_Type)
ctList = list()
for(j in 1:length(cellTypes)){
idx = which(info_mat$Cell_Type==cellTypes[j])
ctList[[cellTypes[j]]] = list(samps = info_mat$Label[idx], tots = info_mat$Total[idx])
}
idx2consider = which(names(sig_geneMod)!="Sample_Info")
for(k in idx2consider){
for(l in 1:length(cellTypes)){
samps2use = ctList[[l]]$samps
tots = ctList[[l]]$tots
y_vecs = paste("sig_geneMod[[k]]$y",samps2use,sep = "_")
y_vecsc = paste(y_vecs,collapse = ",")
nExon = eval(parse(text=sprintf("length(%s)",y_vecs[1])))
textcmd = sprintf("matrix(c(%s),nrow=nExon,ncol=length(samps2use))",y_vecsc)
expMat = eval(parse(text=textcmd))
totmg = tots-colSums(expMat)
expMat2 = rbind(totmg,expMat)
if(cellTypes[l]!="mix"){
sig_geneMod[[k]][[cellTypes[l]]] = list(cellType=cellTypes[l],rds_exons=expMat2)
} else {
sig_geneMod[[k]][[cellTypes[l]]] = list(cellType=cellTypes[l],rds_exons_t=expMat2)
}
}
}
modified_sig_geneMod[[f]] = sig_geneMod
}
save(modified_sig_geneMod, file = "Step4_modified_sig_geneMod.RData")
# Step 5
#-----------------------------------------------------------#
# CALL Pure Sample #
#-----------------------------------------------------------#
## Need further investiation here / in above steps ##
cellTypes = c("set1","set2")
pure_est = list()
for(j in 1:length(modified_sig_geneMod)){
sig_geneMod = modified_sig_geneMod[[j]]
sim.out = sig_geneMod[which(names(sig_geneMod)!="Sample_Info")]
# Clusters with single isoforms:
# EXCLUDE THEM FOR THE MOMENT!.
dim_mat = matrix(0,nrow=length(sim.out),ncol=2)
excl_clust = c()
excl_clust2 = c()
# for(i in 1:(length(sim.out))){ # See explanation of change below
for(i in 1:(length(sim.out)-1)){
dim_mat[i,] = dim(sim.out[[i]][["X"]])
# dim_mat[i,] = dim(sim.out[[i]]["X"][[1]])
if(all(dim_mat[i,]==c(1,1))){
excl_clust = c(excl_clust,i)
}
if(dim_mat[i,2] == 1){
excl_clust2 = c(excl_clust2,i)
}
}
# Note: Exclude last sim.out entry due to explanation below
excl_clust2 = c(excl_clust2, length(sim.out))
excl_clust_union = union(excl_clust,excl_clust2)
if(length(excl_clust_union)>0){
sim.new = sim.out[-c(excl_clust_union)]
} else {
sim.new = sim.out
}
# Optimize the Pure Sample Functions:
tmp.data = Pure.apply.fun(data.list = sim.new, cellTypes = cellTypes, corr_co = 1)
pure_est[[j]] = tmp.data
}
# Error in `[.data.frame`(sim.out[[i]], "X") : undefined columns selected
# Error understood due to comment below
# Note: Very last entry in sim.out (sim.out[[182]] in this example) was not the usual "list"
# object that the other sim.out elements had.
# For now, removing this object, but should investigate later
# > tmp.data = Pure.apply.fun(data.list = sim.new, cellTypes = cellTypes, corr_co = 1)
# Error in base::colSums(x, na.rm = na.rm, dims = dims, ...) :
# 'x' must be an array of at least two dimensions
# Solution: Found that sim.new[[i]][cellTypes]$setk$rds_exons had one column for all elements,
# so colSums function would not work. Corrected code to accommodate matrix with only 1 column.
save(pure_est, file = "Step5_pure_est.RData")
# Step 6
IsoDeconv_Output = list()
for(i in 1:length(pure_est)){
tmp.data = pure_est[[i]]
#--------------------------------------------------------#
# Establish input break ups #
#--------------------------------------------------------#
# Cell-Types:
cellTypes = c("set1","set2")
# Data Set Necessities:
clust.start = 1
clust.end = length(tmp.data)
by.value = 15
start.pts = seq(from = 1,to = clust.end,by = by.value)
end.pts = c((start.pts[-1]-1),clust.end)
cluster_output = list()
for(m in 1:length(start.pts)){
start.pt = start.pts[m]
end.pt = end.pts[m]
# Call Revised_Sim_MixCode_SI.R code
curr.clust.opt = tmp.data[c(start.pt:end.pt)]
curr.clust.out = STG.Update_Cluster.All(all_data=curr.clust.opt, cellTypes = cellTypes,
optimType="nlminb", simple.Init=FALSE, initPts=c(0.5))
cluster_output[[m]] = curr.clust.out
}
IsoDeconv_Output[[i]] = cluster_output
}
# Simple Init Not Performed! Full Fit for each start point!
# Error in cdata[["alpha.est"]][, cellTypes] : subscript out of bounds
# Fixed problem by correcting cellTypes to be c("set1","set2")
save(IsoDeconv_Output, file = "Step6_IsoDeconv_Output.RData")
# Step 7
#-----------------------------------------------------------------------#
# Compile Files #
#-----------------------------------------------------------------------#
Final_Compiled_Output = list()
for(j in 1:length(IsoDeconv_Output)){
comp.out= NULL
curr.clust.out = NULL
#---- Set up new pattern ----#
est.chunks = IsoDeconv_Output[[j]]
message("File ", j)
#---- Populate Output Dataset ----#
comp.out = list()
r = 1
for(i in 1:length(est.chunks)){
curr.clust.out = est.chunks[[i]]
nl = length(curr.clust.out)
for(m in 1:nl){
comp.out[[r]]=curr.clust.out[[m]]
r = r+1
}
}
Final_Compiled_Output[[j]] = comp.out
}
save(Final_Compiled_Output, file = "Step7_FinalCompiled_Output.RData")
|
d35324d57ec44fa36d79a2cb2be1af529a2eea42
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/4648_0/rinput.R
|
8ea4931f9f842d1d04652ab650e69c218680ebe9
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("4648_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4648_0_unrooted.txt")
|
1e58e726a7691dc11b29e037261aaec0e529e160
|
17fdd34b68df267b8262d532adddba733879b0b8
|
/man/dot-gc.Rd
|
c21b3fe1247bf34fd69b6266616168572d6cac84
|
[] |
no_license
|
kevinmhadi/khtools
|
f0b57e0be0014084f2f194465ab4a924fe502268
|
85d64808f8decd71f30510ccd18f38986031be74
|
refs/heads/master
| 2023-07-19T21:50:22.341824
| 2023-07-19T01:46:03
| 2023-07-19T01:46:03
| 235,495,453
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 295
|
rd
|
dot-gc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{.gc}
\alias{.gc}
\title{.gc}
\usage{
.gc(df, ptrn, invert = F, ignore.case = FALSE, exact = FALSE)
}
\arguments{
\item{df}{data frame}
\item{ptrn}{pattern}
}
\description{
.gc
}
\author{
Kevin Hadi
}
|
bbde84e90c0684c04452b0e7c3d2a5c714a4492d
|
c60126c5e91b1c6dced262d8026d9da4af9e18f6
|
/man/new_eval.Rd
|
b47081d0727ece5c4c73e92184daada0fe99d9a7
|
[
"MIT"
] |
permissive
|
davidallen02/pamngr
|
3a36dd78903121c426b216fc96ed3e699ea4ac74
|
cf89491132ca73c84d8b33ae6ac309303254b5af
|
refs/heads/master
| 2023-07-15T06:14:23.118482
| 2021-09-01T21:34:02
| 2021-09-01T21:34:02
| 237,082,991
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 362
|
rd
|
new_eval.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/new-eval.R
\name{new_eval}
\alias{new_eval}
\title{Create a new eval from template}
\usage{
new_eval(ticker, date = Sys.Date())
}
\arguments{
\item{ticker}{an equity security}
\item{date}{the eval date}
}
\value{
a markdown document
}
\description{
Create a new eval from template
}
|
a5c05cbd2cd744355d346d49fac24554efa083a2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/LearnBayes/examples/pbetap.Rd.R
|
e29558895dd0daab47e5c52120aebb1f7e603dc3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 214
|
r
|
pbetap.Rd.R
|
library(LearnBayes)
### Name: pbetap
### Title: Predictive distribution for a binomial sample with a beta prior
### Aliases: pbetap
### Keywords: models
### ** Examples
ab=c(3,12)
n=10
s=0:10
pbetap(ab,n,s)
|
7280fe1ebab575cd52c7c2ac3d39fb6a1d8bfe8d
|
ecc8c9fdab4d52cb5dd8a815bf56f91373555b5e
|
/man/GeomTimelineLabel.Rd
|
8e24b0a3a91a6ac87a3e099aa9e781c0f1ed3cc8
|
[] |
no_license
|
hnasko/capstone_earthquake
|
b2447d9f28ae38d94531512820ba4d66ccc9de02
|
70e9e1637e43f696c09064c70c9b9b601803ce5f
|
refs/heads/master
| 2020-05-26T16:07:41.262233
| 2019-05-23T21:27:04
| 2019-05-23T21:27:04
| 188,298,451
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 800
|
rd
|
GeomTimelineLabel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_timeline_label.R
\docType{data}
\name{GeomTimelineLabel}
\alias{GeomTimelineLabel}
\title{Create a geom for adding a text annotation to a time line plot}
\format{An object of class \code{GeomTimelineLabel} (inherits from \code{Geom}, \code{ggproto}, \code{gg}) of length 5.}
\usage{
GeomTimelineLabel
}
\description{
Create a geom for adding a vertical line to each data point on a time line
chart with a text annotation.
}
\examples{
\dontrun{
dt[YEAR > 2000 & COUNTRY \%in\% c("USA", 'CHINA'), ] \%>\%
ggplot2::ggplot(aes(x = DATE, y = COUNTRY, size = EQ_PRIMARY, color = DEATHS)) +
geom_timeline() +
geom_timeline_label(aes(label = LOCATION_NAME), n_max = 5) +
theme_timeline()
}
}
\keyword{datasets}
|
5cd1af0d42f11813ddde0005b2b5fbcebfa0ac28
|
e4c2b22457b46077a7ef3f1115b11b44430379f7
|
/ui.R
|
7b0a3cd12e7574c9a05c1649d267a44523bb7f91
|
[] |
no_license
|
jlranaliticas/ShinyCOVIDData
|
8456a180f3c1b42571f18a66a011f13f855ad5be
|
5036a7f345b3460258b7706688652354cddf0a84
|
refs/heads/main
| 2023-03-22T13:30:18.553645
| 2021-03-20T16:05:22
| 2021-03-20T16:05:22
| 349,771,940
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,695
|
r
|
ui.R
|
# ui.R - GA COVID-19 State Analysis
#
shinyUI(fluidPage(
# Application title
titlePanel("U.S. State of Georgia COVID-19 County Data"),
navbarPage("Georgia COVID-19 Data",
tabPanel("Map View",
wellPanel(
h5("Filter COVID-19 Data using Sliders"),
sliderInput("casedeath",
label="Death Rate/100K Greater than or Equal to:",
min=0,
max=max(df$Death_Rate),
value=0),
sliderInput("pop",
"Population Size Greater than or Equal to:",
min = 1000,
max = 250000,
value = 1000,
step = 10000),
),
leafletOutput("GAMap"),
),
tabPanel(title="Data Table",
radioButtons("sortseq","Sort Data Table by ..",
c("County", "# Deaths", "# Cases", "# Hospitalizations"),
selected="County",
inline=TRUE),
materialSwitch("sortDir","Ascending/Descending", inline=TRUE),
tableOutput("GATable")
),
navbarMenu("Help",
tabPanel(title="Help on Map",
imageOutput("MapHelp", width="auto", height="auto")
),
tabPanel(title="Help on Data Table",
imageOutput("DataHelp")
)
)
)
)
)
|
37c6302b668d228ce552f84f0556f5116fd3265f
|
c8e98d5c23dc30a92d464364f95d6c08e9a078e1
|
/server.R
|
928b98a81dc622cac87355b195daa4fa5f7b5194
|
[] |
no_license
|
danmaclean/multi_candiSNP
|
386e4ea600daae8fca0cbb3e18abd92380fc3de6
|
051e7869d77f9db3824ffb6e60cb0210d7cf71b4
|
refs/heads/master
| 2021-01-21T14:11:31.999148
| 2016-07-12T15:25:38
| 2016-07-12T15:25:38
| 59,308,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,334
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
# TODO:
# Allele Freq Range Slider - DONE
# Colour by: Change, Effect, Is_CTGA, Is_synonymous - DONE.
# Shape by: Tag - DONE
# only call filedata if values entered in file selector
# filter out centromeres - DONE
library(shiny)
library(ggplot2)
library(jsonlite)
library(dplyr)
filterCentromeres <- function(df){
# "1" : 15086545,
# "2" : 3608429,
# "3" : 14209452,
# "4" : 3956521,
# "5" : 11725524
df <- df %>% filter( (Chr == "1" & (Pos < (15086545 - 500000) | Pos > (15086545 + 500000) ) ) |
(Chr == "2" & (Pos < (3608429 - 500000) | Pos > (3608429 + 500000) ) ) |
(Chr == "3" & (Pos < (14209452 - 500000) | Pos > (14209452 + 500000) ) ) |
(Chr == "4" & (Pos < (3956521 - 500000) | Pos > (3956521 + 500000) ) ) |
(Chr == "5" & (Pos < (11725524 - 500000) | Pos > (11725524 + 500000) ) )
)
return(df)
}
shinyServer(function(input, output) {
output$text1 <- renderUI({
file_count <- 0
if ( length(input$files) > 0) { file_count <- length(input$files$name)
lapply(1:file_count, function(i){
tag <- paste0(input$files$name[i], "_tag")
textInput(tag, paste0("Enter tag for file ", input$files$name[i]), value="Enter Tag..." )
})
}
})
filedata <- reactive({
dataframe_list <- NULL
if (length(input$files) > 0){
dataframe_list <- lapply(1:length(input$files$name), function(i){
df <- read.csv(input$files$datapath[i], header=TRUE, sep=",",stringsAsFactors=TRUE, quote="\"")
tag <- paste0(input$files$name[i], "_tag")
df$tag <- input[[tag]]
df$tag <- as.factor(df$tag)
df$Chr <- as.factor(df$Chr)
if("Is_CTGA" %in% colnames(df)){
df$Is_CTGA <- as.factor(df$Is_CTGA)
}
if("Is_Synonymous" %in% colnames(df)){
df$Is_Synonymous <- as.factor(df$Is_Synonymous)
}
if("Effect" %in% colnames(df)){
df$Effect <- as.factor(df$Effect)
}
if("In_CDS" %in% colnames(df)){
df$In_CDS <- as.factor(df$In_CDS)
}
return(df)
})
}
file_data <- dataframe_list[[1]]
if (length(dataframe_list) > 1){
for (i in 2:length(dataframe_list) ){
file_data <- rbind(file_data, dataframe_list[[i]])
}
}
file_data <- filter(file_data, Allele_Freq >= input$range[1], Allele_Freq <= input$range[2])
if (input$remove_centromere){
file_data <- filterCentromeres(file_data)
}
return(file_data)
})
output$colour_choice <- renderUI({
file_data <- filedata()
categories <- character()
headers = c('Is_CTGA', 'Is_synonymous', 'In_CDS', 'Effect')
for(i in 1:length(colnames(file_data))){
if (colnames(file_data)[i] %in% headers){
categories <- c(categories, colnames(file_data)[i] )
}
}
radioButtons("colour_choice", "Choose Colouring Factor", categories)
})
output$main_plot <- renderPlot({
file_data <- filedata() # only call if file is selected otherwise throws errors
if ("Pos" %in% colnames(file_data) ){
p <- ggplot(file_data, aes(Pos, colour=tag, fill=tag)) + geom_freqpoly(binwidth=input$bw_adjust)
p <- p + facet_grid(.~Chr, space="free", scales="free")#, ncol=1)
p <- p + scale_x_continuous(breaks = seq(0,30000000,10000000) ) #labels=c("5Mb","10Mb","15Mb","20Mb","25Mb","30Mb") )
print(p)
}
})
output$snp_plot <- renderPlot({
file_data <- filedata()
if ("Pos" %in% colnames(file_data) ){
cat(file=stderr(), "drawing histogram with", str(input$colour_choice), "bins\n")
cat(file=stderr(), "drawing histogram with", str(input$spot_alpha), "bins\n")
s <- ggplot(file_data, aes_string(x="Pos", y="Allele_Freq",colour=input$colour_choice, fill=input$colour_choice, shape="tag")) + geom_point( alpha = input$spot_alpha) + facet_grid(. ~ Chr, scales="free_x",space="free_x")
s <- s + scale_x_continuous(breaks = seq(0,30000000,10000000) ) #labels=c("5Mb","10Mb","15Mb","20Mb","25Mb","30Mb") )
print(s)
}
})
})
|
f023de0a9ae6b1c500e2bcea825678c3b20d0bad
|
6e54e5de8202a9b6e4bbf19f856f366f998558eb
|
/plot1.R
|
ab20f552ddf869d1dff3c7eeaacffad5fa4ef154
|
[] |
no_license
|
klootpik/ExData_Plotting1
|
e6e794947ce42e25747d4d764a150f087b17ae85
|
7dd3f6c5aa14400eba6dc5a02af26726b1ab60a9
|
refs/heads/master
| 2020-12-04T11:52:53.484944
| 2020-01-04T11:54:32
| 2020-01-04T11:54:32
| 231,754,703
| 0
| 0
| null | 2020-01-04T11:52:15
| 2020-01-04T11:52:14
| null |
UTF-8
|
R
| false
| false
| 2,143
|
r
|
plot1.R
|
library(data.table)
getwd()
destfile <- "./week1/Dataset.zip"
outDir <- "./week1/uitgepakt"
urlie <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!dir.exists("week1")) {
dir.create("week1")
}
download.file(urlie, destfile)
unzip(destfile, exdir=outDir)
file <- list.files(outDir, recursive = T)
Dataset <- fread(paste0(outDir, "/", file))
# just a check
Dataset[, .N]
Dataset[, .N, Date]
# making the subset, only containing data from the dates 2007-02-01 and 2007-02-02
Databoy <- Dataset[Date %in% c('1/2/2007', '2/2/2007')]
# a check again
Databoy[, .N, Date]
str(Databoy)
# I want to create a new variabele that combines date and time in the right format, since I am not very handy
# I need a lot of intermediate steps to achieve this
Databoy[, ORG_Date := Date]
Databoy[, ORG_Time := Time]
Databoy[, Date := strptime(ORG_Date, "%d/%m/%Y")]
# check to check if conversion went right
Databoy[,.N, .(Date, ORG_Date)]
str(Databoy)
# combine date and time in one variabele
Databoy$Supertime <- paste(Databoy$Date, Databoy$Time)
# convert into right format
Databoy[, Supertime := strptime(Supertime, "%Y-%m-%d %H:%M:%S")]
# final checks on date and time
class(Databoy$Supertime)
Databoy[,.N, .(Supertime, Date, Time)]
# Some variables to be plotted must be converted from character to numeric value, otherwise the plots will explode.
# First a selection of these columns, then defining a function that helps converting, finally an action that
# makes the actual conversion happen:
columnboys <- names(Databoy)[3:9]
class(columnboys)
convertboy <- function(x) as.numeric(x)
# conversion applied to selected columns
Databoy[,(columnboys):=lapply(.SD, convertboy), .SDcols = columnboys]
# check to see if conversion happened
str(Databoy)
### Now it is finally time to make a plot
# plot 1
hist(x = Databoy$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
dev.copy(png, file = "plot1.png", width = 480, height = 480)
dev.off()
|
24f28d2ca5f0254059649708488823c005a7512e
|
73c9b3c52db44bca119ecd3585ff38db1e9c05b1
|
/man/Solve.Rd
|
28343943c708fbdc55dc751d10a24aedf3c9bbec
|
[] |
no_license
|
friendly/matlib
|
a360f4f975ae351ce1a5298c7697b460c4ba8dcd
|
13bb6ef45f9832d4bc96e70acc5e879e5f0c5c90
|
refs/heads/master
| 2023-08-30T13:23:26.679177
| 2023-08-25T17:29:23
| 2023-08-25T17:29:23
| 45,190,492
| 72
| 19
| null | 2023-03-16T19:30:18
| 2015-10-29T15:01:24
|
R
|
UTF-8
|
R
| false
| true
| 2,579
|
rd
|
Solve.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Solve.R
\name{Solve}
\alias{Solve}
\title{Solve and Display Solutions for Systems of Linear Simultaneous Equations}
\usage{
Solve(
A,
b = rep(0, nrow(A)),
vars,
verbose = FALSE,
simplify = TRUE,
fractions = FALSE,
...
)
}
\arguments{
\item{A, }{the matrix of coefficients of a system of linear equations}
\item{b, }{the vector of constants on the right hand side of the equations. The default is a vector of zeros,
giving the homogeneous equations \eqn{Ax = 0}.}
\item{vars}{a numeric or character vector of names of the variables.
If supplied, the length must be equal to the number of unknowns in the equations.
The default is \code{paste0("x", 1:ncol(A)}.}
\item{verbose, }{logical; show the steps of the Gaussian elimination algorithm?}
\item{simplify}{logical; try to simplify the equations?}
\item{fractions}{logical; express numbers as rational fractions, using the \code{\link[MASS]{fractions}}
function; if you require greater accuracy, you can set the \code{cycles} (default 10)
and/or \code{max.denominator} (default 2000) arguments to \code{fractions} as a global option, e.g.,
\code{options(fractions=list(cycles=100, max.denominator=10^4))}.}
\item{..., }{arguments to be passed to \code{link{gaussianElimination}} and \code{\link{showEqn}}}
}
\value{
the function is used primarily for its side effect of printing the solution in a
readable form, but it invisibly returns the solution as a character vector
}
\description{
Solve the equation system \eqn{Ax = b}, given the coefficient matrix
\eqn{A} and right-hand side vector \eqn{b}, using \code{link{gaussianElimination}}.
Display the solutions using \code{\link{showEqn}}.
}
\details{
This function mimics the base function \code{\link[base]{solve}} when supplied with two arguments,
\code{(A, b)}, but gives a prettier result, as a set of equations for the solution. The call
\code{solve(A)} with a single argument overloads this, returning the inverse of the matrix \code{A}.
For that sense, use the function \code{\link{inv}} instead.
}
\examples{
A1 <- matrix(c(2, 1, -1,
-3, -1, 2,
-2, 1, 2), 3, 3, byrow=TRUE)
b1 <- c(8, -11, -3)
Solve(A1, b1) # unique solution
A2 <- matrix(1:9, 3, 3)
b2 <- 1:3
Solve(A2, b2, fractions=TRUE) # underdetermined
b3 <- c(1, 2, 4)
Solve(A2, b3, fractions=TRUE) # overdetermined
}
\seealso{
\code{\link{gaussianElimination}}, \code{\link{showEqn}} \code{\link{inv}}, \code{\link[base]{solve}}
}
\author{
John Fox
}
|
4d92bd5bff3476f592fc36ecbb08e89ee7ca1017
|
1ee37f82a6d81f2fd23c766bed4e124ab299af0e
|
/choyongsang_homework_week8.R
|
d5f6737f2e9c25912d247ae64e854483c24c2865
|
[] |
no_license
|
MyChoYS/R_ADsp_basic
|
c592ed28fd9d409bc81c5de8405833ccf406e218
|
33ae7d1a6639dc88daa566818f747b41a6233373
|
refs/heads/master
| 2023-02-17T09:36:47.169240
| 2021-01-18T04:30:03
| 2021-01-18T04:30:03
| 326,630,110
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,777
|
r
|
choyongsang_homework_week8.R
|
# Analysis of economic information
# Week 8
# homework until Oct 28
# question 0
# Print your name and student number by using print() function
print("12142139 choyongsang")
# question 1
mtcars <- mtcars
View(mtcars)
??mtcars
# Subset cases where the number of cylinders (cyl) is 8 (Don't use subset function)
# and store it as new data frame "eightCylinders"
eightCylinders = sum(mtcars[,2 ] == 8)
eightCylinders
# question 2. Subset cases where the number of forward gears (gear) is 3 (Don't use subset function)
# and store it as new data frame "threeGears"
threeGears = sum(mtcars[ , "gear"] == 3)
threeGears
# question 3. Return only rows for the number of forward gears (gear) that are 3 or 5
# and store it as new data frame "threeFiveGears"
threeFiveGears = sum(mtcars[, "gear"] == 3 | mtcars[, "gear"] == 5)
threeFiveGears
# question 4. Return only rows for the number of forward gears (gear) that are 4, and transmission (am) is automatic (0),
# and store it as new data frame "fourGearAuto"
fourGearAuto = sum(mtcars[, "gear"] == 4 | mtcars[, "am"] == 0)
fourGearAuto
# question 5. Return only rows for the number of forward gears (gear) is NOT 4,
# and store it as new data frame "notFourGear"
notFourGear = sum(mtcars[, "gear"] != 4 )
notFourGear
# question 6. I want to print "My car is heavy!", if the weight (wt) of my car is greater than 4,
# and if not, print "My car is not that heavy."
# Complete belowed if statement
mycar <- mtcars[8,]
if (mycar$wt > 4) {
print("My car is heavy!")
} else {
print("My car is not that heavy.")
}
# question 7. I want to print "My car is automatic!", if the transmission of my car is automatic (am is 0),
# and if not, print "My car is manual."
# Complete belowed if statement
mycar <- mtcars[8,]
if (mycar$am = 0) {
print("My car is automatic.")
} else {
print("My car is manual")
}
# question 8. I want to print "My car is very heavy!", if the weight (wt) of my car is greter or equal to 4,
# "My car is heavy enough", if the weight of my car is greater or equal to 3 and less than 4,
# "My car is light", otherwise.
# Hint: use else if statement
mycar <- mtcars[15,]
if (mycar$wt >=4){
print("My car is very heavy")
} else if (mycar$wt >=3 & mycar$wt < 4) {
print("My car is heavy enough")
} else {
print("My car is light")
}
# question 9. I want to print "My car is fantastic!", if the 1/4 mile time (qsec) of my car is less or eaual to 15,
# "My car is decent", if the 1/4 mile time of my car is between 15 and 19,
# "My car is a turtle!", otherwise
# Hint: use else if statement
mycar <- mtcars[6,]
if (mycar$qsec <= 15){
print("My car is fantastic!")
} else if (mycar$qsec > 15 & mycar$qsec <= 19){
print("My car is decent")
} else {
print("My car is a turtle!")
}
|
138c0c91233f613cd4ab08d755db0ffc847baa16
|
7d5d8492c2d88b88bdc57e3c32db038a7e7e7924
|
/robustness/glam-utils/calibrate.R
|
8f915f671b7a5d2b6ec901fb67a61b9a4d283b0d
|
[] |
no_license
|
CIAT-DAPA/dapa-climate-change
|
80ab6318d660a010efcd4ad942664c57431c8cce
|
2480332e9d61a862fe5aeacf6f82ef0a1febe8d4
|
refs/heads/master
| 2023-08-17T04:14:49.626909
| 2023-08-15T00:39:58
| 2023-08-15T00:39:58
| 39,960,256
| 15
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,532
|
r
|
calibrate.R
|
#Julian Ramirez-Villegas
#UoL / CCAFS / CIAT
#Feb 2014 #borrows from PhD script called "glam-optimise-functions.R"
##############################################################################################
####### function to calibrate GLAM (for as many grid cells as provided), each one individually
##############################################################################################
#this (first) function should
#1. use number of steps to choose the values to iterate
#2. calibrate (i.e. find optimum YGP) for each grid cell individually
#3. return table of grid cell * ygp values, RMSE value, and crop yield
#note: a second function will deal with optimisation of parameters
#note: this function should be applicable to both the hypercube and the normal
# optimisation procedure
# #example:
# #---------------------------------------------------------------
# src.dir <- "~/Repositories/dapa-climate-change/trunk/robustness"
# source(paste(src.dir,"/glam-utils/make_dirs.R",sep=""))
# source(paste(src.dir,"/glam-utils/make_soilfiles.R",sep=""))
# source(paste(src.dir,"/glam-utils/make_sowfile.R",sep=""))
# source(paste(src.dir,"/glam-utils/make_wth.R",sep=""))
# source(paste(src.dir,"/glam-utils/make_parameterset.R",sep=""))
# source(paste(src.dir,"/glam-utils/get_parameterset.R",sep=""))
# source(paste(src.dir,"/glam-utils/run_glam.R",sep=""))
#
# wd <- "~/Leeds-work/quest-for-robustness"
# runsDir <- paste(wd,"/crop_model_runs",sep="")
# calibDir <- paste(runsDir,"/ppe_optimisation",sep="")
# mdataDir <- paste(wd,"/data/model_data",sep="")
# metDir <- paste(wd,"/data/meteorology",sep="")
# binDir <- paste(wd,"/bin/glam-maize-osx",sep="")
#
# #load objects
# load(paste(mdataDir,"/initial_conditions_major.RData",sep=""))
# load(paste(mdataDir,"/yield_major.RData",sep=""))
#
# #arguments
# cal_data <- list()
# cal_data$CROP <- "maize"
# cal_data$MODEL <- "glam-maiz"
# cal_data$BASE_DIR <- calibDir
# cal_data$BIN_DIR <- binDir
# cal_data$PAR_DIR <- mdataDir
# cal_data$WTH_DIR <- paste(metDir,"/ascii_extract_raw",sep="") #for reading .wth files
# cal_data$WTH_ROOT <- "obs_hist_WFD"
# cal_data$LOC <- c(680,681,682)
# cal_data$ISYR <- 1981
# cal_data$IEYR <- 2000
# cal_data$INI_COND <- xy_main
# cal_data$YLD_DATA <- xy_main_yield
# cal_data$PARAMS <- GLAM_get_default(cal_data$PAR_DIR)
# cal_data$SIM_NAME <- "optim1"
# cal_data$NSTEPS <- 100
# cal_data$RUN_TYPE <- "RFD"
# cal_data$METHOD <- "RMSE"
# cal_data$USE_SCRATCH <- F
# cal_data$SCRATCH <- NA
#
# #modify parameter value to avoid model failure
# cal_data$PARAMS$glam_param.maize$TLIMJUV$Value <- 280
#
# ygpcalib <- GLAM_calibrate(cal_data)
# #---------------------------------------------------------------
# #plotting some of the results
# xx <- ygpcalib$RAW_DATA[which(ygpcalib$RAW_DATA$VALUE==0.87 & ygpcalib$RAW_DATA$LOC==680),]
# plot(xx$YEAR,xx$OBS_ADJ,ty="l",ylim=c(0,1200))
# lines(xx$YEAR,xx$PRED_ADJ,col="red")
#
# yy <- ygpcalib$CALIBRATION[which(ygpcalib$CALIBRATION$LOC==680),]
# plot(yy$VALUE, yy$RMSE/yy$YOBS_ADJ*100, ty='l',ylim=c(0,100))
### note:
#simulate year before starting one because if sowing date is late then harvest is in this year
#last year cannot be last year of time series since model runs could fail due to late sowing
### further notes:
#** due to issues in Sahel, multiple plantings were considered: 10 plantings between dates of Sacks
#** due to issues in Sahel, SAT*1.15 and SAT*1.30 were also simulated (soil storage was too poor)
#calibrate ygp
GLAM_calibrate <- function(cal_data) {
param <- "YGP" #toupper(cal_data$PARAM)
sect <- "glam_param.ygp" #tolower(cal_data$SECT)
params <- cal_data$PARAMS
#put years into parameter set
params$glam_param.mod_mgt$ISYR <- cal_data$ISYR
params$glam_param.mod_mgt$IEYR <- cal_data$IEYR
#here is the optimisation method
#RMSE: is yearly root mean square error (classical)
#CH07: is the MSE method proposed in Challinor et al. (2007) AGEE, that optimises based on
# the differences between mean and standard deviations of the simulated time series
#CH10: is the MSE method proposed in Challinor et al. (2010) ERL, that optimises based
# on the difference between mean yields only. I guess this method is only valid when
# an insufficiently large observed yield + weather time series is available.
if (is.null(cal_data$METHOD)) {
opt_meth <- "RMSE" #defaulting to RMSE if missing in input list
} else {
opt_meth <- toupper(cal_data$METHOD)
}
if (!opt_meth %in% c("RMSE","CH07","CH10")) {
opt_meth <- "RMSE" #defaulting the RMSE
}
#input directories and model
exec_name <- cal_data$MODEL
#running command
glam_cmd <- paste("./",exec_name,sep="")
#output directories
if (cal_data$USE_SCRATCH) {
cal_dir <- cal_data$SCRATCH #calibration directory
} else {
cal_dir <- cal_data$BASE_DIR #calibration directory
}
if (!file.exists(cal_dir)) {dir.create(cal_dir,recursive=T)}
cal_dir <- paste(cal_dir,"/",cal_data$SIM_NAME,sep="") #calibration directory
if (!file.exists(cal_dir)) {dir.create(cal_dir)}
#create optimisation folder if it does not exist
opt_dir <- paste(cal_dir,"/",tolower(param),sep="")
if (!file.exists(opt_dir)) {dir.create(opt_dir)}
#create sequence of values
#vals <- seq(params[[sect]][[param]][,"Min"],params[[sect]][[param]][,"Max"],length.out=cal_data$NSTEPS)
#vals <- seq(0,1,length.out=51)[2:51] #0.2, 0.4, ... 1.0 (total of 50)
vals <- c(0.01,seq(0.05,1,length.out=20)) #(total of 21)
#loop through desired locations
for (loc in cal_data$LOC) {
#loc <- cal_data$LOC[1]
cat("\n...loc= ",loc,sep="","\n")
#sowing window
sow_date1 <- cal_data$INI_COND$SOW_DATE1[which(cal_data$INI_COND$LOC == loc)]
sow_date2 <- cal_data$INI_COND$SOW_DATE2[which(cal_data$INI_COND$LOC == loc)]
#sow_window <- sow_date1 - sow_date2 #no need due to multiple planting
params$glam_param.mod_mgt$ISDAY$Value <- -30 #min(c(sow_window,-30)) #set to -30 as multiple planting
#data.frame of iterative soil*sowing date trials
sow_seq <- round(seq(sow_date1, sow_date2, length.out=5), 0)
sol_seq <- c(1,1.3,1.6)
run_df <- expand.grid(sow=sow_seq, sol=sol_seq)
#prepare input object
run_data <- list()
run_data$CROP <- cal_data$CROP
run_data$MODEL <- cal_data$MODEL
run_data$BASE_DIR <- opt_dir
run_data$BIN_DIR <- cal_data$BIN_DIR
run_data$PAR_DIR <- NA
run_data$WTH_DIR <- paste(cal_data$WTH_DIR,"/",cal_data$WTH_ROOT,sep="") #to be specified
run_data$LOC <- loc
run_data$LON <- cal_data$INI_COND$x[which(cal_data$INI_COND$LOC == loc)]
run_data$LAT <- cal_data$INI_COND$y[which(cal_data$INI_COND$LOC == loc)]
run_data$ME <- cal_data$INI_COND$ME_NEW[which(cal_data$INI_COND$LOC == run_data$LOC)]
run_data$SOW_DATE <- cal_data$INI_COND$SOW_DATE1[which(cal_data$INI_COND$LOC == run_data$LOC)]
run_data$RLL <- cal_data$INI_COND$RLL[which(cal_data$INI_COND$LOC == run_data$LOC)]
run_data$DUL <- cal_data$INI_COND$DUL[which(cal_data$INI_COND$LOC == run_data$LOC)]
run_data$SAT <- NA #cal_data$INI_COND$SAT[which(cal_data$INI_COND$LOC == run_data$LOC)]
run_data$ISYR <- cal_data$ISYR
run_data$IEYR <- cal_data$IEYR
run_data$PARAMS <- params
#loop through sequence of values
for (i in 1:length(vals)) {
#i <- 1
cat("performing ygp calibration run ",cal_data$RUN_TYPE," ",i," value = ",vals[i],sep="","\n")
#run id
run_data$RUN_ID <- paste("run-",i,"_val-",vals[i],"_loc-",run_data$LOC,sep="")
#assign values to parameter set
run_data$PARAMS[[sect]][[param]][,"Value"] <- vals[i]
#run all sow*sol options for this YGP value and location
pred_all <- data.frame()
for (k in 1:nrow(run_df)) {
#k <- 1
#get sow date and SAT multiplier
sow_date <- run_df$sow[k]
run_data$SAT <- cal_data$INI_COND$SAT[which(cal_data$INI_COND$LOC == run_data$LOC)] * run_df$sol[k]
#run the model from scratch if k == 1, otherwise just go to dir, run and grab
#check whether the *.out already exists
outfile <- list.files(paste(run_data$BASE_DIR,"/",run_data$RUN_ID,"/output",sep=""),pattern="\\.out")
if (length(outfile) == 0) {
if (k == 1) {
run_data <- run_glam(run_data)
} else {
#if (cal_data$USE_SCRATCH) {}
solfil <- make_soilcodes(outfile=paste(run_data$BASE_DIR,"/",run_data$RUN_ID,"/inputs/ascii/soil/soilcodes.txt",sep=""))
solfil <- make_soiltypes(data.frame(CELL=run_data$LOC,RLL=run_data$RLL,DUL=run_data$DUL,SAT=run_data$SAT),
outfile=paste(run_data$BASE_DIR,"/",run_data$RUN_ID,"/inputs/ascii/soil/soiltypes.txt",sep=""))
sowfil <- make_sowdates(data.frame(CELL=run_data$LOC,SOW_DATE=sow_date),
outfile=paste(run_data$BASE_DIR,"/",run_data$RUN_ID,"/inputs/ascii/sow/sowing.txt",sep=""))
thisdir <- getwd(); setwd(paste(run_data$BASE_DIR,"/",run_data$RUN_ID,sep="")); system(paste("./",run_data$MODEL,sep="")); setwd(thisdir)
}
} else {
run_data$SEAS_FILES <- outfile
run_data$RUN_DIR <- paste(run_data$BASE_DIR,"/",run_data$RUN_ID,sep="")
}
#read in the simulated yield
if (length(run_data$SEAS_FILES) == 1 | length(outfile) == 1) {
pred <- read.table(paste(run_data$RUN_DIR,"/output/",run_data$SEAS_FILES,sep=""),header=F,sep="\t")
names(pred) <- c("YEAR","LAT","LON","PLANTING_DATE","STG","RLV_M","LAI","YIELD","BMASS","SLA",
"HI","T_RAIN","SRAD_END","PESW","TRANS","ET","P_TRANS+P_EVAP","SWFAC","EVAP+TRANS",
"RUNOFF","T_RUNOFF","DTPUPTK","TP_UP","DRAIN","T_DRAIN","P_TRANS","TP_TRANS",
"T_EVAP","TP_EVAP","T_TRANS","RLA","RLA_NORM","RAIN_END","DSW","TRADABS",
"DUR","VPDTOT","TRADNET","TOTPP","TOTPP_HIT","TOTPP_WAT","TBARTOT",
"IPLANT","LETHAL_YIELD","LETHAL_HI","LETHAL_BMASS","LETHAL_BMASS","LETHAL_DAP",
"SWFAC_TOT","SWFAC_MEAN","SWFAC_COUNT")
pred <- cbind(SOW=sow_date, SAT_FAC=run_df$sol[k], pred[,c("YEAR","STG","YIELD","PLANTING_DATE","DUR")])
pred_all <- rbind(pred_all, pred)
system(paste("rm -f ",run_data$RUN_DIR,"/output/*.out",sep="")) #remove junk
}
}
#read in all files and determine best sat multiplier
if (nrow(pred_all) > 0) { #for existence of output GLAM file
#average by YEAR and SAT_FAC
#pred_all <- pred_all[which(pred_all$STG != 9),] #first remove STG=9 (no emergence)
pred_all$YIELD[which(pred_all$STG == 9)] <- NA #set to NA all STG==0
pred_agg <- aggregate(pred_all[,c("SOW","YIELD","PLANTING_DATE","DUR")], by=list(YEAR=pred_all$YEAR, SAT_FAC=pred_all$SAT_FAC), FUN=function(x) {mean(x,na.rm=T)})
#perform this calculation for each value of SAT_FAC
odf_all <- data.frame()
for (sfac in sol_seq) {
#sfac <- sol_seq[1]
#grab predicted yield
pred <- pred_agg[which(pred_agg$SAT_FAC == sfac),]
y_p <- pred$YIELD
y_p[which(is.na(y_p))] <- 0 #set to zero any NAs (product of emergence failure)
#grab observed yield
y_o <- as.data.frame(t(cal_data$YLD_DATA[which(cal_data$YLD_DATA$x == run_data$LON & cal_data$YLD_DATA$y == run_data$LAT),3:ncol(cal_data$YLD_DATA)]))
y_o <- cbind(YEAR=1982:2005,y_o)
names(y_o)[2] <- "YIELD"
row.names(y_o) <- 1:nrow(y_o)
y_o <- y_o[which(y_o$YEAR >= cal_data$ISYR & y_o$YEAR <= cal_data$IEYR),]
y_o <- y_o$YIELD
#get simulated yield, depending on which year the crop was actually harvested:
#** if planted and harvested this year then use simulation from this year to end
#** if planted this and harvested next year then use simulation from year-1 to end-1
har_date <- mean((pred$PLANTING_DATE + pred$DUR)) #get harvest date first
if (har_date<365) {y_p <- y_p[2:length(y_p)]} else {y_p <- y_p[1:(length(y_p)-1)]}
odf <- data.frame(YEAR=(cal_data$ISYR+1):cal_data$IEYR,VALUE=vals[i],OBS=y_o,PRED=y_p)
## detrending (borrows from detrender-functions.R)
#detrend observed yield
fit_loess <- loess(odf$OBS ~ odf$YEAR) #compute lowess fit
y_loess <- predict(fit_loess, odf$YEAR, se=T) #theoretical prediction
odf$LOESS_PRED <- y_loess$fit
rd_loess <- (odf$OBS - odf$LOESS_PRED) / odf$LOESS_PRED #relative difference
odf$OBS_ADJ <- (rd_loess+1) * mean(odf$OBS, na.rm=T) #odf$OBS[nrow(odf)] #loess
#detrend simulated yield
if (length(which(odf$PRED == 0)) == length(odf$PRED)) {
odf$PRED_ADJ <- 0
} else {
fit_loess <- loess(odf$PRED ~ odf$YEAR, degree=1, span=2) #compute lowess fit
y_loess <- predict(fit_loess, odf$YEAR, se=T) #theoretical prediction
odf$LOESS_PRED <- y_loess$fit
rd_loess <- (odf$PRED - odf$LOESS_PRED) / odf$LOESS_PRED #relative difference
odf$PRED_ADJ <- (rd_loess+1) * mean(odf$PRED, na.rm=T) #odf$PRED[nrow(odf)] #loess
}
odf$LOESS_PRED <- NULL #remove extra field
#choose optimisation method (RMSE, CH07, CH10)
if (opt_meth == "RMSE") {
rmse <- sqrt(sum((odf$OBS_ADJ-odf$PRED_ADJ)^2,na.rm=T) / (length(which(!is.na(odf$OBS_ADJ)))))
} else if (opt_meth == "CH07") {
rmse <- (mean(odf$OBS_ADJ,na.rm=T)-mean(odf$PRED_ADJ,na.rm=T))^2 + (sd(odf$OBS_ADJ,na.rm=T)-sd(odf$PRED_ADJ,na.rm=T))^2
} else if (opt_meth == "CH10") {
rmse <- (mean(odf$OBS_ADJ,na.rm=T)-mean(odf$PRED_ADJ,na.rm=T))^2
}
odf <- cbind(SAT_FAC=sfac, RMSE=rmse, odf)
odf_all <- rbind(odf_all, odf)
}
#select minimum RMSE
rmse_all <- aggregate(odf_all[,c("RMSE")], by=list(SAT_FAC=odf_all$SAT_FAC), FUN=function(x) {mean(x,na.rm=T)})
sfac <- rmse_all$SAT_FAC[which(rmse_all$x == min(rmse_all$x))][1]
rmse <- min(rmse_all$x)
#remove junk
system(paste("rm -rf ",run_data$RUN_DIR,sep=""))
} else {
rmse <- NA
}
odf <- odf_all[which(odf_all$SAT_FAC == sfac),]; odf$RMSE <- NULL
out_row <- data.frame(VALUE=vals[i], SAT_FAC=sfac, RMSE=rmse, YOBS=mean(odf$OBS,na.rm=T), YPRED=mean(odf$PRED,na.rm=T),
YOBS_ADJ=mean(odf$OBS_ADJ,na.rm=T), YPRED_ADJ=mean(odf$PRED_ADJ,na.rm=T))
if (i == 1) {
out_all <- out_row
raw_all <- odf
} else {
out_all <- rbind(out_all,out_row)
raw_all <- rbind(raw_all, odf)
}
}
#append location data
out_all <- cbind(LOC=loc,out_all)
raw_all <- cbind(LOC=loc,raw_all)
if (loc == cal_data$LOC[1]) {
cal_all <- out_all
raw_cal <- raw_all
} else {
cal_all <- rbind(cal_all, out_all)
raw_cal <- rbind(raw_cal, raw_all)
}
}
#remove junk from scratch
if (cal_data$USE_SCRATCH) {
system(paste("rm -rf ",cal_dir,sep=""))
}
#return object
r_list <- list(CALIBRATION=cal_all, RAW_DATA=raw_cal)
return(r_list)
}
|
f5bbdfbb82c128b59a89707ca57a8aa830a45ecc
|
5326bb95528c8eefc6bb9675e82829b607fb1fec
|
/plot3.R
|
8ea450a61a3ec16fd4e7dfc71dafa5585ac07277
|
[] |
no_license
|
rainaz/ExData_Plotting1
|
ef836a98b4c5c702051c99f396ec991158a2ed36
|
27092f91088ccb08cef26a10d05fd1470a252686
|
refs/heads/master
| 2021-01-17T20:19:24.556234
| 2014-05-11T03:14:49
| 2014-05-11T03:14:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 787
|
r
|
plot3.R
|
if(!exists('household_power_consumption.txt')){
unzip('exdata-data-household_power_consumption.zip')
}
d <- read.csv2('household_power_consumption.txt', stringsAsFactor=FALSE)
d <- subset(d, as.Date(d$Date, "%d/%m/%Y") == as.Date("2007-02-01") |
as.Date(d$Date, "%d/%m/%Y") == as.Date("2007-02-02")
)
t <- strptime(paste(d$Date, d$Time), "%d/%m/%Y %H:%M:%S")
png(filename = 'plot3.png', width = 480, height = 480, unit = 'px')
plot(t, d$Sub_metering_1, type = 'l', ylab = "Energy sub metering", xlab = "", col = 'black')
points(t, d$Sub_metering_2, type = 'l', col = "red")
points(t, d$Sub_metering_3, type='l', col = "blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lwd = 1)
dev.off()
|
79f299ca7e61beeb64a3a2173e0b061ef1e19edc
|
e5fc120f866933943a29c796c7c607dc2690cab3
|
/analysis/ipm/validation/ipm_noseedl_glm_validation.R
|
13e7f0823ecdc9009452af47f71704a9c226d70a
|
[] |
no_license
|
AldoCompagnoni/lupine
|
e07054e7e382590d5fa022a23e024dfec80c80b2
|
afc41a2b66c785957db25583f25431bb519dc7ec
|
refs/heads/master
| 2021-06-23T04:50:30.617943
| 2021-06-11T13:00:59
| 2021-06-11T13:00:59
| 185,047,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,489
|
r
|
ipm_noseedl_glm_validation.R
|
# Hyper-simplified IPM for validation
# I compare observed lambda to deterministic lambda
# only for sites BS, NB, and DR
rm(list=ls())
options(stringsAsFactors = F)
library(dplyr)
library(tidyr)
library(ggplot2)
library(readxl)
library(testthat)
library(bbmle)
source('analysis/vital_rates/plot_binned_prop.R')
# format data frames ------------------------------------------------
# read lupine data
lupine_df <- read.csv( "data/lupine_all.csv")
# information on sites
site_df <- select(lupine_df,location) %>%
unique %>%
mutate( Site = gsub(' \\([0-9]\\)','', location) %>%
toupper )
# set up year/site information
att<- data.frame( location = 'ATT (8)',
year = c(2008:2012) )
p9 <- data.frame( location = 'POP9 (9)',
year = c(2008:2014) )
bs <- data.frame( site_id = 'BS (7)',
year = c(2009:2017) )
dr <- data.frame( site_id = 'DR (3)',
year = c(2009:2014,2016:2017) )
nb <- data.frame( site_id = 'NB (2)',
year = c(2010,2012:2016) )
site_all <- list(bs, dr, nb) %>% bind_rows
# consumption
cons_df <- read_xlsx('data/consumption.xlsx') %>%
mutate( Mean_consumption = Mean_consumption %>% as.numeric) %>%
select( Year, Site, Mean_consumption) %>%
# expand potential "cases"
complete( Site, Year) %>%
# update name
mutate( Site = toupper(Site) ) %>%
mutate( Mean_consumption = replace(Mean_consumption,
is.na(Mean_consumption),
mean(Mean_consumption,na.rm=T)
) ) %>%
left_join( site_df ) %>%
# remove NA locations
subset( !is.na(location) ) %>%
# remove annoying code
select( -Site ) %>%
rename( year = Year,
cons = Mean_consumption )
# abortion
abor_df <- subset(lupine_df, !is.na(flow_t0) & flow_t0 == 1 ) %>%
subset( !is.na(numrac_t0) ) %>%
# remove non-flowering individuals
subset( !(flow_t0 %in% 0) ) %>%
# remove zero fertility (becase fertility should not be 0)
subset( !(numrac_t0 %in% 0) ) %>%
# only years indicated by Tiffany
subset( year %in% c(2010, 2011, 2013:2017) ) %>%
# calculate abortion rates
mutate( ab_r = numab_t0 / numrac_t0 ) %>%
group_by( location, year ) %>%
summarise( ab_r_m = mean(ab_r, na.rm=T) ) %>%
ungroup %>%
right_join( rename(site_all,
location = site_id) ) %>%
mutate( ab_r_m = replace(ab_r_m,
is.na(ab_r_m),
mean(ab_r_m,
na.rm=T)) )
# germination
germ <- read_xlsx('data/seedbaskets.xlsx') %>%
select(g0:g2) %>%
colMeans
# site-spec germination rates
germ_df <- site_all %>%
rename( location = site_id ) %>%
select( location ) %>%
unique %>%
mutate( germ_obs = c(0.0156,0.0157,0.026) ) %>%
# post-dispersal predation
mutate( post_d_p = (germ['g0'] - germ_obs) / germ['g0'] )
# create lambdas for every year
yr_lambdas <-function(ii){ #germ_est, sb, dangre
# data
lupine_df <- read.csv( "data/lupine_all.csv") %>%
subset( year == site_all$year[ii] ) %>%
# subset( location == 'DR (3)')
subset( location == site_all$site_id[ii] )
# subset( location == 'NB (2)')
hist(lupine_df$log_area_t0, freq=F)
abline(v=1)
# lupine_df <- subset(lupine_df, log_area_t0 > 1)
fruit_rac <- read_xlsx('data/fruits_per_raceme.xlsx')
seed_x_fr <- read_xlsx('data/seedsperfruit.xlsx')
pred_g <- read_xlsx('data/post predation_lupinus tidestromii.xlsx')
sl_size <- data.frame( mean_sl_size = 2.725375531,
sd_sl_size = 0.914582829,
max_sl_size = 6.082794487,
min_sl_size = -0.241564475 )
# vital rates format --------------------------------------------------------------
surv <- subset(lupine_df, !is.na(surv_t1) ) %>%
subset( area_t0 != 0) %>%
mutate( log_area_t02 = log_area_t0^2,
log_area_t03 = log_area_t0^3 )
grow <- lupine_df %>%
subset(!(stage_t0 %in% c("DORM", "NF") ) &
!(stage_t1 %in% c("D", "NF", "DORM")) ) %>%
# remove zeroes from area_t0 and area_t1
subset( area_t0 != 0) %>%
subset( area_t1 != 0) %>%
mutate( log_area_t1 = log(area_t1),
log_area_t0 = log(area_t0),
log_area_t02 = log(area_t0)^2 )
flow <- subset(lupine_df, !is.na(flow_t0) ) %>%
subset( area_t0 != 0) %>%
mutate( log_area_t0 = log(area_t0),
log_area_t02 = log(area_t0)^2)
fert <- subset(lupine_df, flow_t0 == 1 ) %>%
subset( area_t0 != 0) %>%
subset( !is.na(numrac_t0) ) %>%
# remove non-flowering indiv.
subset( !(flow_t0 %in% 0) ) %>%
mutate( log_area_t0 = log(area_t0),
log_area_t02 = log(area_t0)^2 ) %>%
# remove zero fertility (becase fertility should not be 0)
# NOTE: in many cases, notab_t1 == 0, because numab_t1 == 0 also
subset( !(numrac_t0 %in% 0) )
# models ---------------------------------------------------------
# survival: quadratic predictor or not?
mod_s1 <- glm(surv_t1 ~ log_area_t0, data=surv, family='binomial')
mod_s2 <- glm(surv_t1 ~ log_area_t0 + log_area_t02, data=surv, family='binomial')
# mod_s3 <- glm(surv_t1 ~ log_area_t0 + log_area_t02 + log_area_t03, data=surv, family='binomial')
mod_l <- list(mod_s1, mod_s2)
mod_sel <- c(AIC(mod_s1),AIC(mod_s2))
mod_s <- mod_l[[which(mod_sel == min(mod_sel))]]
# other models
mod_g <- lm( log_area_t1 ~ log_area_t0, data=grow )
g_lim <- range(lupine_df$log_area_t0,na.rm=T)
mod_fl <- glm(flow_t0 ~ log_area_t0, data=flow, family='binomial')
mod_fr <- glm(numrac_t0 ~ log_area_t0, data=fert, family='poisson')
# mod_fr <- MASS::glm.nb(numrac_t1 ~ log_area_t1, data=fert )
fr_rac <- glm(NumFruits ~ 1, data=fruit_rac, family='poisson')
seed_fr <- glm(SEEDSPERFRUIT ~ 1,
data=mutate(seed_x_fr,
# substitute 0 value with really low value (0.01)
SEEDSPERFRUIT = replace(SEEDSPERFRUIT,
SEEDSPERFRUIT == 0,
0.01) ),
family=Gamma(link = "log"))
# models parameters -------------------------------------------------
surv_p <- coef(mod_s)
grow_p <- coef(mod_g)
grow_p <- c(grow_p, summary(mod_g)$sigma)
flow_p <- coef(mod_fl)
fert_p <- coef(mod_fr)
size_sl_p <- sl_size
fr_rac_p <- coef(fr_rac) %>% exp
seed_fr_p <- coef(seed_fr) %>% exp
cons_p <- cons_df %>%
subset( year == site_all$year[ii] ) %>%
subset( location == site_all$site_id[ii] ) %>%
.$cons
abor_p <- abor_df %>%
subset( year == site_all$year[ii] ) %>%
subset( location == site_all$site_id[ii] )
germ_p <- germ * (1 - (subset(germ_df, location == site_all$site_id[ii] ) %>%
.$post_d_p))
# model validation plots ---------------------------------------------------
tiff( paste0('results/ipm/validation/vr/',
site_all$site_id[ii],'_',
site_all$year[ii],'.tiff'),
unit="in", width=6.3, height=6.3, res=600,compression="lzw" )
par( mfrow=c(2,2), mar=c(3,3,0.1,0.1), mgp=c(1.7,0.5,0) )
# survival
plot_binned_prop(surv, 10, log_area_t0, surv_t1)
coef_s <- coef(mod_s)
coef_s[3]<- ifelse(coef_s['log_area_t02'] %>%
is.na,
0,
coef_s['log_area_t02'])
coef_s[4]<- ifelse(coef_s['log_area_t03'] %>%
is.na,
0,
coef_s['log_area_t03'])
x_seq <- seq(min(surv$log_area_t0),
max(surv$log_area_t0),by=0.1)
y_pred <- boot::inv.logit( coef_s[1] +
coef_s[2]*x_seq +
coef_s[3]*(x_seq^2) +
coef_s[4]*(x_seq^3) )
lines(x_seq, y_pred)
# growth
plot(log_area_t1 ~ log_area_t0, data=grow)
mod_g <- lm( log_area_t1 ~ log_area_t0, data=grow )
abline(mod_g)
# flowering
plot_binned_prop(flow, 10, log_area_t0, flow_t0)
mod_fl <- glm(flow_t0 ~ log_area_t0, data=flow, family='binomial')
x_seq <- seq(min(flow$log_area_t0),
max(flow$log_area_t0),by=0.1)
y_pred <- boot::inv.logit( coef(mod_fl)[1] +
coef(mod_fl)[2]*x_seq )
lines(x_seq, y_pred)
# fertility
plot(numrac_t0 ~ log_area_t0, data=fert)
x_seq <- seq(min(fert$log_area_t0),
max(fert$log_area_t0),by=0.1)
y_pred <- exp( coef(mod_fr)[1] + coef(mod_fr)[2]*x_seq )
lines(x_seq, y_pred,lwd=3,col='red')
dev.off()
# IPM parameters -------------------------------------------------------------
# function to extract values
extr_value <- function(x, field){ subset(x, type_coef == 'fixef' & ranef == field )$V1 }
# list of mean IPM parameters.
pars_mean <- list( # adults vital rates
surv_b0 = surv_p['(Intercept)'],
surv_b1 = surv_p['log_area_t0'],
surv_b2 = ifelse(surv_p['log_area_t02'] %>%
is.na,
0,
surv_p['log_area_t02']),
#surv_b3 = surv_p['log_area_t03'],
grow_b0 = grow_p['(Intercept)'],
grow_b1 = grow_p['log_area_t0'],
grow_sig = grow_p[3],
flow_b0 = flow_p['(Intercept)'],
flow_b1 = flow_p['log_area_t0'],
fert_b0 = fert_p['(Intercept)'],
fert_b1 = fert_p['log_area_t0'],
abort = abor_p$ab_r_m,
clip = cons_p,
fruit_rac = fr_rac_p,
seed_fruit = seed_fr_p,
g0 = germ_p['g0'],
g1 = germ_p['g1'],
g2 = germ_p['g2'],
recr_sz = size_sl_p$mean_sl_size,
recr_sd = size_sl_p$sd_sl_size,
L = g_lim[1], #-0.2415645,
U = g_lim[2], #9.3550582,
mat_siz = 200 )
# IPM functions ------------------------------------------------------------------------------
inv_logit <- function(x){ exp(x)/(1+exp(x)) }
# Survival at size x
sx<-function(x,pars){
# survival prob. of each x size class
inv_logit( pars$surv_b0 +
pars$surv_b1 * x +
pars$surv_b2 * (x^2) # + pars$surv_b3 * x^3
)
}
# growth (transition) from size x to size y
gxy <- function(y,x,pars){
# returns a *probability density distribution* for each x value
dnorm(y, mean = pars$grow_b0 + pars$grow_b1*x,
sd = pars$grow_sig)
}
# transition: Survival * growth
pxy<-function(y,x,pars){
return( sx(x,pars) * gxy(y,x,pars) )
}
# production of seeds from x-sized mothers
fx <-function(x,pars){
# total racemes prod
tot_rac <- inv_logit( pars$flow_b0 + pars$flow_b1*x ) *
exp( pars$fert_b0 + pars$fert_b1*x )
# viable racs
viab_rac <- tot_rac * (1 - pars$abort) * (1- pars$clip)
# viable seeds
viab_sd <- viab_rac * pars$fruit_rac * pars$seed_fruit
viab_sd
}
# Size distribution of recruits
recs <-function(y,pars){
dnorm(y, mean = pars$recr_sz, sd = pars$recr_sd )
}
fxy <- function(y,x,pars){
fx(x,pars) * recs(y,pars)
}
# # IPM kernel/matrix ------------------------------------------------------------
# kernel_sb <- function(pars){
#
# # set up IPM domains --------------------------------------------------------
#
# # plants
# n <- pars$mat_siz
# L <- pars$L
# U <- pars$U
# #these are the upper and lower integration limits
# h <- (U-L)/n #Bin size
# b <- L+c(0:n)*h #Lower boundaries of bins
# y <- 0.5*(b[1:n]+b[2:(n+1)]) #Bins' midpoints
# #these are the boundary points (b) and mesh points (y)
#
# # populate kernel ------------------------------------------------------------
#
# # seeds mini matrix
# s_mat <- matrix(0,2,2)
#
# # seeds that enter 1 yr-old seed bank
# plant_s1 <- fx(y,pars) * (1 - pars$g0)
#
# # no seeds go directly to 2 yr-old seed bank!
# plant_s2 <- numeric(n)
#
# # seeds that go directly to seedlings germinate right away
# Fmat <- (outer(y,y, fxy, pars) * pars$g0 * h)
#
# # recruits from the 1 yr-old seedbank
# s1_rec <- h * recs(y, pars) * pars$g1
#
# # seeds that enter 2 yr-old seed bank
# s_mat[2,1] <- (1 - pars$g1)
#
# # recruits from the 2 yr-old seedbank
# s2_rec <- h * recs(y, pars) * pars$g2
#
# # survival and growth of adult plants
# Tmat <- (outer(y,y,pxy,pars)*h)
#
# # rotate <- function(x) t(apply(x, 2, rev))
# # outer(y,y, fxy, pars, h) %>% t %>% rotate %>% image
#
# small_K <- Tmat + Fmat
#
# # Assemble the kernel -------------------------------------------------------------
#
# # top 2 vectors
# from_plant <- rbind( rbind( plant_s1, plant_s2),
# small_K )
#
# # leftmost vectors
# from_seed <- rbind( s_mat,
# cbind(s1_rec, s2_rec) )
#
# k_yx <- cbind( from_seed, from_plant )
#
# return(k_yx)
#
# # return(small_K)
#
# }
#
# # simple IPM kernel/matrix ------------------------------------------------------------
# kernel_s <- function(pars){
#
# # set up IPM domains --------------------------------------------------------
#
# # plants
# n <- pars$mat_siz
# L <- pars$L
# U <- pars$U
# #these are the upper and lower integration limits
# h <- (U-L)/n #Bin size
# b <- L+c(0:n)*h #Lower boundaries of bins
# y <- 0.5*(b[1:n]+b[2:(n+1)]) #Bins' midpoints
# #these are the boundary points (b) and mesh points (y)
#
# # populate kernel ------------------------------------------------------------
#
# # seeds that go directly to seedlings germinate right away
# Fmat <- (outer(y,y, fxy, pars) * pars$g0 * h)
#
# # survival and growth of adult plants
# Tmat <- (outer(y,y,pxy,pars)*h)
#
# # rotate <- function(x) t(apply(x, 2, rev))
# # outer(y,y, fxy, pars, h) %>% t %>% rotate %>% image
#
# small_K <- Tmat + Fmat
#
# return(small_K)
#
# }
#
# kernel dangremond
kernel_dangre <- function(pars){
# set up IPM domains --------------------------------------------------------
# plants
n <- pars$mat_siz
L <- pars$L
U <- pars$U
#these are the upper and lower integration limits
h <- (U-L)/n #Bin size
b <- L+c(0:n)*h #Lower boundaries of bins
y <- 0.5*(b[1:n]+b[2:(n+1)]) #Bins' midpoints
#these are the boundary points (b) and mesh points (y)
# populate kernel ------------------------------------------------------------
# seeds mini matrix
s_mat <- matrix(0,2,2)
# seeds that enter 2 yr-old seed bank
plant_s2 <- fx(y,pars) * pars$g2
# seeds that enter 1 yr-old seed bank
plant_s1 <- fx(y,pars) * pars$g1
# seeds that go directly to seedlings germinate right away
Fmat <- (outer(y,y, fxy, pars) * pars$g0 * h)
# seeds that enter 2 yr-old seed bank
s_mat[2,1] <- 1
# recruits from the 1 yr-old seedbank
s1_rec <- h * recs(y, pars)
# recruits from the 2 yr-old seedbank
s2_rec <- h * recs(y, pars)
# survival and growth of adult plants
Tmat <- (outer(y,y,pxy,pars)*h)
# rotate <- function(x) t(apply(x, 2, rev))
# outer(y,y, fxy, pars, h) %>% t %>% rotate %>% image
small_K <- Tmat + Fmat
# Assemble the kernel -------------------------------------------------------------
# top 2 vectors
from_plant <- rbind( rbind( plant_s2, plant_s1),
small_K )
# leftmost vectors
from_seed <- rbind( s_mat,
cbind(s1_rec, s2_rec) )
k_yx <- cbind( from_seed, from_plant )
return(k_yx)
}
# if(sb){
# if(dangre){
# ker <- kernel_dangre(pars_mean)
# }else{
# ker <- kernel_sb(pars_mean)
# }
# }else{
# ker <- kernel_s( pars_mean)
# }
ker <- kernel_dangre(pars_mean)
Re(eigen(ker)$value[1])
eK=eigen(ker); lambda=Re(eK$values[1]);
w=Re(eK$vectors[,1]); w=w/sum(w);
v=Re(eigen(t(ker))$vectors[,1]); v=v/v[1];
par(mfrow=c(1,1), mar=c(3.5,3.5,0.5,0.5))
plot(w)
# calculate h
n <- pars_mean$mat_siz
L <- pars_mean$L
U <- pars_mean$U
h <- (U-L)/n
# set up observed frequencies of
freq_v <- lupine_df$log_area_t0 %>%
cut( breaks=seq(g_lim[1]-0.00001,
g_lim[2]+0.00001,
length.out=(pars_mean$mat_siz+1)) ) %>%
table %>%
as.vector
# realized lambda: simulated
# lam_r_s <- sum(ker %*% (freq_v))/sum(freq_v)
lam_r_s <- NA
pop_n <- read.csv( "data/lupine_all.csv") %>%
subset( location == site_all$site_id[ii] ) %>%
subset( !is.na(stage_t0) ) %>%
# subset( log_area_t0 > 1 ) %>%
count( year, location )
# realized lambda: observed
lam_r <- subset(pop_n, year == site_all$year[ii]+1)$n /
subset(pop_n, year == site_all$year[ii])$n
legend('topright',
as.character(round(Re(eigen(ker)$value[1]),3)),
bty='n')
legend('bottomleft',
as.character( lam_r ),
bty='n')
print(paste0('end of ', ii))
list( lam_det = Re(eigen(ker)$value[1]),
lam_r = lam_r,
lam_r_s = lam_r_s,
# germ_est= germ_est,
# sb = sb,
year = site_all$year[ii],
site_id = site_all$site_id[ii])
}
# F,F
# F,T
# T,T
# T,F
# store lambdas
lam_df <- lapply(1:23, yr_lambdas) %>% bind_rows
# germ_est=F, sb=T, dangre=F) %>% bind_rows
# # potential output labes
# label_df <- data.frame( title = c('Seedbank model, experimental data',
# 'No seedbank model, experimental data',
# 'Seedbank model, estimated data',
# 'No seedbank model, estimated data'),
# file = c( 'lambda_seedbank_experimental_g',
# 'lambda_Noseedbank_experimental_g',
# 'lambda_seedbank_estimated_g',
# 'lambda_Noseedbank_estimated_g'),
# germ_est= c(F,F,T,T),
# sb = c(T,F,T,F),
# stringsAsFactors = F
# )
# # store labels
# plot_lab <- label_df %>%
# subset( germ_est == first(lam_df$germ_est) &
# sb == first(lam_df$sb) )
# # store R squared
# mod <- lm(lam_df$lam_det~lam_df$lam_r, offset=)
# R2 <- round(summary(mod)$r.squared, 3)
# plot results
ggplot(lam_df, aes(x=lam_r, y=lam_det)) +
geom_point() +
geom_abline( size=1.2 ) +
xlab(expression(lambda['observed'])) +
ylab(expression(lambda)) +
# annotate( 'text', label = paste0('R2= ',R2), x=0.5, y=2) +
theme( axis.title = element_text( size=25) ) +
ggtitle( 'Lambda seedbank estimated g, correct abor/cons' ) +
ggsave( paste0('results/ipm/validation/',
'lambda_seedbank_estimated_g_abor_clip','.tiff'),
width = 6.3, height = 6.3, compression="lzw" )
# compare projected vs. observed population numbers -----------
# project pop numbers
rel_vs_del_lambda <- function(site_id_str){
# population numbers
pop_nt0 <- read.csv( "data/lupine_all.csv") %>%
subset( location == site_id_str ) %>%
subset( !is.na(stage_t0) ) %>%
count( year, location ) %>%
setNames( c('year','location','n_t0') )
pop_nt1 <- read.csv( "data/lupine_all.csv") %>%
subset( location == site_id_str ) %>%
subset( !is.na(stage_t0) ) %>%
count( year, location ) %>%
mutate( year = year - 1) %>%
setNames( c('year','location','n_t1') )
pop_tr1 <- full_join(pop_nt0, pop_nt1)
# format dataframe to plot info
n1_df <- subset(lam_df, site_id == site_id_str ) %>%
select( lam_det, lam_r, year ) %>%
# join population numbers
left_join( pop_tr1 ) %>%
# project population
mutate( proj_t1 = n_t0 * lam_det ) %>%
select( year, location, n_t1, proj_t1 ) %>%
gather(abund_type, value,n_t1 :proj_t1) %>%
mutate( abund_type = replace(abund_type,
abund_type == 'proj_t1',
'projected') ) %>%
mutate( abund_type = replace(abund_type,
abund_type == 'n_t1',
'observed') )
}
# sites to
site_id_l <- lam_df %>% .$site_id %>% unique
# project populations and put it all together
proj_obs_df <- lapply(site_id_l, rel_vs_del_lambda) %>%
bind_rows
# plot it all
ggplot( data=proj_obs_df,
aes(x=year, y=value) ) +
geom_line( aes(color=abund_type),
lwd = 2 ) +
viridis::scale_color_viridis(discrete=T) +
ylab( 'Population Number' ) +
facet_grid( 1 ~ location ) +
scale_x_continuous(breaks = 0:2100) +
theme( axis.text.x = element_text( angle = 70) ) +
ggsave( paste0('results/ipm/validation/',
'BS_DR_NB_germ_aborclip.tiff'),
width=6.3,height=4,compression='lzw')
# # Compare a validation in the literature
# tenhumberg <- read.csv('C:/cloud/Dropbox/lupine/data/tenhumberg_validation.csv')
#
# ggplot(tenhumberg, aes(lam_r, lam_s) ) +
# geom_point( ) +
# xlab( expression(lambda['observed']) ) +
# ylab( expression(lambda['s']) ) +
# theme( axis.title = element_text( size=25) ) +
# geom_abline( size = 1.2 ) +
# ggsave( 'results/validation/tenhumberg_validation.csv.tiff',
# width = 6.3, height = 6.3, compression="lzw" )
# nPar = length(pars_mean);
# sPar = numeric(nPar); # vector to hold parameter sensitivities
# dp = 0.01; # perturbation for calculating sensitivities
#
# for(j in 1:nPar){
# m.par = pars_mean;
# m.par[[j]]=m.par[[j]] - dp;
# IPM.down = kernel(m.par);
# lambda.down = Re(eigen(IPM.down)$values[1]);
# m.par[[j]]=m.par[[j]] + 2*dp;
# IPM.up = kernel(m.par);
# lambda.up = Re(eigen(IPM.up)$values[1]);
# sj = (lambda.up-lambda.down)/(2*dp);
# sPar[j]=sj;
# cat(j,names(pars_mean)[j],sj,"\n");
# }
#
# graphics.off(); dev.new(width=11,height=6);
# par(mfrow=c(2,1),mar=c(4,2,2,1),mgp=c(2,1,0));
# barplot(sPar,names.arg=names(pars_mean),main="Parameter sensitivities");
# barplot(sPar*abs(pars_mean)/lambda,names.arg=names(pars_mean),main="Parameter elasticities");
|
1ea1dbff888437d23bc7158bc255f5ce567d8640
|
b429e3f69e229130d64b6e4c3c4e8493295f223b
|
/scripts/simulation_misspecified1.R
|
88e7025c81eda199013e7dd9e82ecbcf4167ed6f
|
[] |
no_license
|
quanvu17/deepspat_multivar
|
110bca3a8a16166dcd21ef522b0d506bccccbd8e
|
86123973790c85ea18a4827f60f11d87569579c5
|
refs/heads/master
| 2023-02-25T18:39:37.963131
| 2021-01-27T00:05:32
| 2021-01-27T00:05:32
| 254,308,536
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,806
|
r
|
simulation_misspecified1.R
|
# Reproducible code for "Modeling Nonstationary and Asymmetric
# Multivariate Spatial Covariances via Deformations"
# Copyright (c) 2020 Quan Vu
# Author: Quan Vu, quanv (at) uow.edu.au
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Load source
source("scripts/utils.R")
# Simulate dataset
length.out <- 101
s <- expand.grid(seq(-0.5, 0.5, length.out = length.out),
seq(-0.5, 0.5, length.out = length.out)) %>% as.matrix()
swarped <- s
D <- fields::rdist(swarped)
C11 <- fields::Matern(D, range=0.2, nu=0.5, phi=1)
C12 <- fields::Matern(D, range=0.2, nu=0.75, phi=0.8*0.9*1)
C22 <- fields::Matern(D, range=0.2, nu=1, phi=0.9^2)
C <- rbind(cbind(C11, C12), cbind(t(C12), C22))
K <- t(chol(C))
i <- 1
set.seed(i)
y <- K %*% rnorm(nrow(s)*2)
y1 <- y[1: nrow(s),]
y2 <- y[(nrow(s)+1) : (nrow(s)*2),]
z1 <- y1 + 0.2*rnorm(length(y1))
z2 <- y2 + 0.1*rnorm(length(y2))
z <- c(z1, z2)
df <- data.frame(s, y1, y2, z1, z2)
names(df) <- c("s1", "s2", "y1", "y2", "z1", "z2")
# Save data
save(df, file="results/simulation_misspecified1_dataset.rda")
# Sample a subset of data for the experiment
RNGkind(sample.kind = "Rounding")
set.seed(1)
sam2 <- sample(1:nrow(df), 1000)
train_data <- df[sam2,]
test_data <- dplyr::setdiff(df, train_data)
df <- dplyr::select(train_data, s1, s2, z1, z2)
newdata <- dplyr::select(test_data, s1, s2)
groups_data <- split(newdata, (seq(nrow(newdata))-1) %/% (nrow(newdata)/5))
layers <- c(AWU(r = 50, dim = 1L, grad = 200, lims = c(-0.5, 0.5)),
AWU(r = 50, dim = 2L, grad = 200, lims = c(-0.5, 0.5)),
RBF_block(),
LFT())
layers_asym <- c(AFF_2D())
# Fit Model 1 (stationary, symmetric)
t1 <- proc.time()
d1 <- deepspat_bivar_GP(f = z1 + z2 ~ s1 + s2 - 1, data = df, g = ~ 1,
family = "matern_stat_symm",
method = "REML", nsteps = 150L
)
t2 <- proc.time()
time1 <- (t2 - t1)[3]
predML1 <- predict(d1, newdata = newdata)
# Fit Model 2 (nonstationary, asymmetric)
t1 <- proc.time()
d2 <- deepspat_bivar_GP(f = z1 + z2 ~ s1 + s2 - 1, data = df, g = ~ 1,
layers = layers, layers_asym = layers_asym,
family = "matern_nonstat_asymm",
method = "REML", nsteps = 150L
)
t2 <- proc.time()
time2 <- (t2 - t1)[3]
predML2 <- predict(d2, newdata = newdata)
# Save parameter estimates
eta <- d2$run(d2$eta_tf)
AFF_pars <- d2$run(d2$layers_asym[[1]]$pars)
LFT_pars <- d2$run(d2$layers[[12]]$pars)
scalings <- d2$run(d2$scalings)
scalings_asym <- d2$run(d2$scalings_asym)
nu_1 <- d2$run(d2$nu_tf_1)
nu_2 <- d2$run(d2$nu_tf_2)
sigma2_1 <- d2$run(d2$sigma2_tf_1)
sigma2_2 <- d2$run(d2$sigma2_tf_2)
sigma2_12 <- d2$run(d2$sigma2_tf_12)
l <- as.numeric(d2$run(d2$l_tf_1))
precy_1 <- d2$run(d2$precy_tf_1)
precy_2 <- d2$run(d2$precy_tf_2)
s_warped1 <- d2$run(d2$swarped_tf1)
s_warped2 <- d2$run(d2$swarped_tf2)
beta <- d2$run(d2$beta)
parameter_est <- list(eta, AFF_pars, LFT_pars, scalings, scalings_asym,
nu_1, nu_2, sigma2_1, sigma2_2, sigma2_12, l, precy_1, precy_2,
s_warped1, s_warped2, beta)
save(parameter_est, file = paste0("results/simulation_misspecified1_parameter_est.rda"))
df_pred1 <- data.frame(predML1$df_pred, y1=test_data$y1, y2=test_data$y2)
df_pred2 <- data.frame(predML2$df_pred, y1=test_data$y1, y2=test_data$y2)
# Save RMSPE and CRPS
rmse1_model1 <- RMSPE(df_pred1$y1, df_pred1$pred_mean_1)
rmse2_model1 <- RMSPE(df_pred1$y2, df_pred1$pred_mean_2)
crps1_model1 <- CRPS(df_pred1$y1, df_pred1$pred_mean_1, df_pred1$pred_var_1)
crps2_model1 <- CRPS(df_pred1$y2, df_pred1$pred_mean_2, df_pred1$pred_var_2)
rmse1_model2 <- RMSPE(df_pred2$y1, df_pred2$pred_mean_1)
rmse2_model2 <- RMSPE(df_pred2$y2, df_pred2$pred_mean_2)
crps1_model2 <- CRPS(df_pred2$y1, df_pred2$pred_mean_1, df_pred2$pred_var_1)
crps2_model2 <- CRPS(df_pred2$y2, df_pred2$pred_mean_2, df_pred2$pred_var_2)
Cost1 <- d1$run(d1$Cost)
Cost2 <- d2$run(d2$Cost)
# Save cross validation results
validation <- list(rmse1_model1, crps1_model1, rmse2_model1, crps2_model1, Cost1, time1,
rmse1_model2, crps1_model2, rmse2_model2, crps2_model2, Cost2, time2
)
save(validation, file=paste0("results/simulation_misspecified1_validation.rda"))
# Cross validation results
matrix(unlist(lapply(validation, mean)), nrow = 2, byrow=T)
|
29c65aeb964f07108349ae2595194d187151536a
|
7eb031dee3c31a700ab29888a1193dd60f6c093a
|
/R/real.data.experiment.fn.R
|
a6a3bc88159d184c5f02fadf3fb0126d09ea9633
|
[] |
no_license
|
adalisan/OmnibusEmbed
|
1fb2e008d13db049dd079b17ba3dc322f720bd2f
|
61e6016eb4d1168a20037ed67dd87a062f1f5588
|
refs/heads/master
| 2020-12-03T05:08:46.070760
| 2017-07-11T00:17:36
| 2017-07-11T00:17:36
| 95,339,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,371
|
r
|
real.data.experiment.fn.R
|
#' Title
#'
#' @param Diss.E
#' @param Diss.F
#' @param m
#' @param d
#' @param oos
#' @param separability.entries.w
#' @param wt.equalize
#' @param assume.matched.for.oos
#' @param oos.use.imputed
#' @param pom.config
#' @param w.vals
#' @param size
#' @param jack.rep.count
#' @param verbose
#'
#' @return
#' @export
#'
#' @examples
run.jacknife<- function( Diss.E, Diss.F,
m =2 , d,
oos ,separability.entries.w,wt.equalize,
assume.matched.for.oos,oos.use.imputed,
pom.config=NULL,
w.vals, size, jack.rep.count=100,verbose=verbose){
N<-nrow(Diss.E)
test.samp.size<-m
test.stats.jk <-list()
test.stats.jk.CCA <-list()
test.stats.jk.POM <-list()
test.stats.jk.INDSCAL <-list()
test.stats.jk.IDIOSCAL <-list()
w.val.len<- length(w.vals)
n= N- (2*m)
for (jack.i in 1:jack.rep.count){
left.out.samp<- sample(1:N,2*test.samp.size)
test.matched<- left.out.samp[1:test.samp.size]
test.unmatched<- left.out.samp[test.samp.size+(1:test.samp.size)]
test.matched <- sort(test.matched)
test.unmatched <- sort(test.unmatched)
#sample.for.unmatched<- sample(1:n,test.samp.size)
orig.indices<-1:N
#sample.for.unmatched.orig.index<-orig.indices[-left.out.samp][sample.for.unmatched]
T0 <- matrix(0,w.val.len,m) #Test statistics for JOFC under null
TA <- matrix(0,w.val.len,m) #Test statistics for JOFC under alternative
D1<-Diss.E[-left.out.samp,-left.out.samp]
D2<-Diss.F[-left.out.samp,-left.out.samp]
train.test.0<-orig.indices[-left.out.samp]
train.test.0<-c(train.test.0,test.matched)
train.test.A<-orig.indices[-left.out.samp]
train.test.A<-c(train.test.A,test.unmatched)
D10A<- Diss.E[train.test.0,train.test.0]
D20<- Diss.F[train.test.0,train.test.0]
D2A<- Diss.F[train.test.A, train.test.A]
D.oos.1 <- as.matrix(Diss.E[test.matched,test.matched])
D.oos.2.null <- as.matrix(Diss.F[test.matched,test.matched])
D.oos.2.alt <- as.matrix(Diss.F[test.unmatched,test.unmatched])
L.in.oos.0 <- omnibusM.inoos(Diss.E[-left.out.samp,][,test.matched],Diss.F[-left.out.samp,][,test.matched],matrix(0,n,m))
L.in.oos.A <- omnibusM.inoos(Diss.E[-left.out.samp,][,test.matched],Diss.F[-left.out.samp,][,test.unmatched],matrix(0,n,m))
print(str(T0))
print(str(TA))
print(str(D1))
print(str(D2))
print(str(D10A))
print(str(D20))
power.w.star<- 0
print("starting JOFC embedding ")
test.stats.null.alt <- run.jofc( D1, D2, D10A,D20,D2A,
D.oos.1, D.oos.2.null , D.oos.2.alt ,
L.in.oos.0 , L.in.oos.A,n, m, d,
model=NULL, oos, Wchoice="avg", separability.entries.w, wt.equalize,
assume.matched.for.oos,oos.use.imputed,
pom.config=NULL,
w.vals, size, verbose=verbose)
p.dim <- 10
test.stats.null.alt.CCA<- run.cca( D1 = D1, D2 = D2 ,
D10A = D10A, D20= D20, D2A=D2A,
p = p.dim, q = 0, d = d, c.val = 0,
m = m, n= n , pprime1 = p.dim, pprime2 = p.dim,
model = "", oos=TRUE, size,verbose = verbose)
test.stats.null.alt.POM<- run.pom( D1 = D1, D2 = D2 ,
D10A = D10A, D20= D20,
D2A=D2A,
p = p.dim, q = 0,d = d,
c.val = 0, proc.dilation = TRUE,
m = m,
n= n,
model = "", oos=TRUE,
size=size, verbose = verbose)
test.stats.null.alt.INDSCAL<- run.indscal( D1 = D1, D2 = D2 ,
D10A = D10A, D20= D20,
D2A=D2A,
d=d,
m=m,
model = "indscal", oos=TRUE,
size=size, verbose = verbose)
test.stats.null.alt.IDIOSCAL<- run.indscal( D1 = D1, D2 = D2 ,
D10A = D10A, D20= D20,
D2A=D2A,
d=d,
m=m,
model = "idioscal", oos=TRUE,
size=size, verbose = verbose)
if (w.val.len==1){
test.stats.jk$T0 <- c(test.stats.jk$T0, test.stats.null.alt$T0)
test.stats.jk$TA <- c(test.stats.jk$TA, test.stats.null.alt$TA)
test.stats.jk.CCA$T0 <- c(test.stats.jk.CCA$T0, test.stats.null.alt.CCA$T0)
test.stats.jk.CCA$TA <- c(test.stats.jk.CCA$TA, test.stats.null.alt.CCA$TA)
test.stats.jk.POM$T0 <- c(test.stats.jk.POM$T0, test.stats.null.alt.POM$T0)
test.stats.jk.POM$TA <- c(test.stats.jk.POM$TA, test.stats.null.alt.POM$TA)
test.stats.jk.IDIOSCAL$T0 <- c(test.stats.jk.IDIOSCAL$T0, test.stats.null.alt.IDIOSCAL$T0)
test.stats.jk.IDIOSCAL$TA <- c(test.stats.jk.IDIOSCAL$TA, test.stats.null.alt.IDIOSCAL$TA)
test.stats.jk.INDSCAL$T0 <- c(test.stats.jk.INDSCAL$T0, test.stats.null.alt.INDSCAL$T0)
test.stats.jk.INDSCAL$TA <- c(test.stats.jk.INDSCAL$TA, test.stats.null.alt.INDSCAL$TA)
} else {
test.stats.jk$T0 <- cbind(test.stats.jk$T0, test.stats.null.alt$T0)
test.stats.jk$TA <- cbind(test.stats.jk$TA, test.stats.null.alt$TA)
test.stats.jk.CCA$T0 <- cbind(test.stats.jk.CCA$T0, test.stats.null.alt.CCA$T0)
test.stats.jk.CCA$TA <- cbind(test.stats.jk.CCA$TA, test.stats.null.alt.CCA$TA)
test.stats.jk.POM$T0 <- cbind(test.stats.jk.POM$T0, test.stats.null.alt.POM$T0)
test.stats.jk.POM$TA <- cbind(test.stats.jk.POM$TA, test.stats.null.alt.POM$TA)
test.stats.jk.IDIOSCAL$T0 <- cbind(test.stats.jk.IDIOSCAL$T0, test.stats.null.alt.IDIOSCAL$T0)
test.stats.jk.IDIOSCAL$TA <- cbind(test.stats.jk.IDIOSCAL$TA, test.stats.null.alt.IDIOSCAL$TA)
test.stats.jk.INDSCAL$T0 <- cbind(test.stats.jk.INDSCAL$T0, test.stats.null.alt.INDSCAL$T0)
test.stats.jk.INDSCAL$TA <- cbind(test.stats.jk.INDSCAL$TA, test.stats.null.alt.INDSCAL$TA)
}
}
return(list(jofc = test.stats.jk, cca = test.stats.jk.CCA,pom=test.stats.jk.POM,
indscal=test.stats.jk.INDSCAL, idioscal=test.stats.jk.IDIOSCAL ))
}
#' Title
#'
#' @param m.i
#' @param N
#' @param test.samp.size
#' @param w.val.len
#' @param Diss.E
#' @param Diss.F
#' @param d
#' @param oos
#' @param separability.entries.w
#' @param wt.equalize
#' @param assume.matched.for.oos
#' @param oos.use.imputed
#' @param w.vals
#' @param size
#' @param verbose
#' @param level.critical.val
#'
#' @return
#' @export
#'
#' @examples
run.JOFC.match.jacknife.replicate<- function(m.i, N, test.samp.size, w.val.len, Diss.E, Diss.F
, d, oos, separability.entries.w
, wt.equalize, assume.matched.for.oos
, oos.use.imputed, w.vals, size, verbose
, level.critical.val) {
m<-test.samp.size
n<- N-2*test.samp.size
power.mc <-array(0,dim=c(w.val.len,length(size)))
left.out.samp<- sample(1:N,2*test.samp.size)
test.matched<- left.out.samp[1:test.samp.size]
test.unmatched<- left.out.samp[test.samp.size+(1:test.samp.size)]
test.matched <- sort(test.matched)
test.unmatched <- sort(test.unmatched)
#sample.for.unmatched<- sample(1:n,test.samp.size)
orig.indices<-1:N
#sample.for.unmatched.orig.index<-orig.indices[-left.out.samp][sample.for.unmatched]
T0 <- matrix(0,w.val.len,m) #Test statistics for JOFC under null
TA <- matrix(0,w.val.len,m) #Test statistics for JOFC under alternative
D1<-Diss.E[-left.out.samp,-left.out.samp]
D2<-Diss.F[-left.out.samp,-left.out.samp]
train.test.0<-orig.indices[-left.out.samp]
train.test.0<-c(train.test.0,test.matched)
train.test.A<-orig.indices[-left.out.samp]
train.test.A<-c(train.test.A,test.unmatched)
D10A<- Diss.E[train.test.0,train.test.0]
D20<- Diss.F[train.test.0,train.test.0]
D2A<- Diss.F[train.test.A,train.test.A]
D.oos.1 <- Diss.E[test.matched,test.matched]
D.oos.2.null <- Diss.F[test.matched,test.matched]
D.oos.2.alt <- Diss.F[test.unmatched,test.unmatched]
L.in.oos.0 <- omnibusM.inoos(Diss.E[-left.out.samp,][,test.matched],Diss.F[-left.out.samp,][,test.matched],matrix(0,n,m))
L.in.oos.A <- omnibusM.inoos(Diss.E[-left.out.samp,][,test.matched],Diss.F[-left.out.samp,][,test.unmatched],matrix(0,n,m))
ideal.omnibus.0 <- omnibusM(omnibusM (D1,D2,matrix(0,n,n)),omnibusM(D.oos.1,D.oos.2.null,matrix(0,m,m)),L.in.oos.0)
ideal.omnibus.A <- omnibusM(omnibusM (D1,D2,matrix(0,n,n)),omnibusM(D.oos.1,D.oos.2.alt,matrix(0,m,m)),L.in.oos.A)
if (verbose) print(str(T0))
if (verbose) print(str(TA))
if (verbose) print(str(D1))
if (verbose) print(str(D2))
if (verbose) print(str(D10A))
if (verbose) print(str(D20))
power.w.star<- 0
print("starting JOFC embedding ")
jacknife.res <- run.jacknife( D1, D2, m =1 , d,
oos ,separability.entries.w,wt.equalize,
assume.matched.for.oos,oos.use.imputed,
pom.config=NULL,
w.vals, size, jack.rep.count=100,verbose=verbose)
T.crit.val <- compute.crit.val (jacknife.res$jofc$T0,level.critical.val)
T.crit.val.cca <- compute.crit.val (jacknife.res$cca$T0,level.critical.val)
T.crit.val.pom <- compute.crit.val (jacknife.res$pom$T0,level.critical.val)
T.crit.val.indscal <- compute.crit.val (jacknife.res$indscal$T0,level.critical.val)
T.crit.val.idioscal <- compute.crit.val (jacknife.res$idioscal$T0,level.critical.val)
JOFC.results <- run.jofc( D1, D2, D10A,D20,D2A,
D.oos.1, D.oos.2.null , D.oos.2.alt ,
L.in.oos.0 , L.in.oos.A, n,m, d,
model=NULL,oos,Wchoice="avg" ,separability.entries.w,wt.equalize,
assume.matched.for.oos,oos.use.imputed,
pom.config=NULL,
w.vals=w.vals, size, verbose=verbose)
if (verbose) print("JOFC test statistic complete \n")
CCA.results<- run.cca( D1 = D1, D2 = D2 ,
D10A = D10A, D20= D20, D2A=D2A,
p = 5, q = 0,d = d,c.val = 0,
m = m, n= n , pprime1 = 5, pprime2 = 5,
model = "", oos=TRUE, size=size,verbose = verbose)
POM.results<- run.pom( D1 = D1, D2 = D2 ,
D10A = D10A, D20= D20,
D2A=D2A,
p = d, q = 0,d = d,c.val = 0, proc.dilation = TRUE,
m = m,
n= n,
model = "", oos=TRUE,
size=size, verbose = verbose)
INDSCAL.results <- run.indscal( D1 = D1, D2 = D2 ,
D10A = D10A, D20= D20,
D2A=D2A,
d=d,
m=m,
model = "indscal", oos=TRUE,
size=size, verbose = verbose)
IDIOSCAL.results <- run.indscal( D1 = D1, D2 = D2 ,
D10A = D10A, D20= D20,
D2A=D2A,
d=d,
m=m,
model = "idioscal", oos=TRUE,
size=size, verbose = verbose)
T0 <- JOFC.results$T0
TA <- JOFC.results$TA
power.mc <-array(0,dim=c(w.val.len,length(size)))
real.size.mc <-array(0,dim=w.val.len)
power.at.real.size.mc <-array(0,dim=w.val.len)
for (l in 1:w.val.len){
w.val.l <- w.vals[l]
real.size.mc <- get_reject_rate(T0[l,],T.crit.val)
power.at.real.size.mc <- get_reject_rate(TA[l,],T.crit.val)
power.l <- get_power(T0[l,],TA[l,],size)
power.mc[l,]<-power.l
}
T0.cca <- CCA.results$T0
TA.cca <- CCA.results$TA
power.mc.CCA <-array(0,dim=c(1,length(size)))
real.size.mc.CCA <-array(0,dim=c(1))
power.at.real.size.mc.CCA <-array(0,dim=1)
real.size.mc.CCA <- get_reject_rate(T0.cca,T.crit.val.cca)
power.at.real.size.mc.CCA <- get_reject_rate(TA.cca,T.crit.val.cca)
power.mc.CCA<- get_power(T0.cca,TA.cca,size)
T0.pom<- POM.results$T0
TA.pom <- POM.results$TA
power.mc.POM <-array(0,dim=c(1,length(size)))
real.size.mc.POM <-array(0,dim=c(1))
power.at.real.size.mc.POM <-array(0,dim=c(1))
real.size.mc.POM <- get_reject_rate(T0.pom,T.crit.val.pom)
power.at.real.size.mc.POM <- get_reject_rate(TA.pom,T.crit.val.pom)
power.mc.POM <- get_power(T0.pom,TA.pom,size)
T0.indscal <- INDSCAL.results$T0
TA.indscal <- INDSCAL.results$TA
power.mc.INDSCAL <-array(0,dim=c(1,length(size)))
real.size.mc.INDSCAL <-array(0,dim=c(1))
power.at.real.size.mc.INDSCAL <-array(0,dim=1)
real.size.mc.INDSCAL <- get_reject_rate(T0.indscal,T.crit.val.indscal)
power.at.real.size.mc.INDSCAL <- get_reject_rate(TA.indscal,T.crit.val.indscal)
power.mc.INDSCAL<- get_power(T0.indscal,TA.indscal,size)
T0.idioscal <- IDIOSCAL.results$T0
TA.idioscal <- IDIOSCAL.results$TA
power.mc.IDIOSCAL <-array(0,dim=c(1,length(size)))
real.size.mc.IDIOSCAL <-array(0,dim=c(1))
power.at.real.size.mc.IDIOSCAL <-array(0,dim=1)
real.size.mc.IDIOSCAL <- get_reject_rate(T0.idioscal,T.crit.val.idioscal)
power.at.real.size.mc.IDIOSCAL <- get_reject_rate(TA.idioscal,T.crit.val.idioscal)
power.mc.IDIOSCAL<- get_power(T0.idioscal,TA.idioscal,size)
return(list(T0=T0,TA=TA,power.mc=power.mc,
real.size.mc=real.size.mc,real.power.mc=power.at.real.size.mc,
cca=list(T0=T0.cca,TA=TA.cca,power.mc=power.mc.CCA,
real.size.mc=real.size.mc.CCA,real.power.mc=power.at.real.size.mc.CCA
),
pom = list(T0=T0.pom,TA=TA.pom,power.mc=power.mc.POM,
real.size.mc=real.size.mc.POM,real.power.mc=power.at.real.size.mc.POM
),
idioscal=list(T0=T0.idioscal,TA=TA.idioscal,power.mc=power.mc.IDIOSCAL,
real.size.mc=real.size.mc.IDIOSCAL,real.power.mc=power.at.real.size.mc.IDIOSCAL
),
indscal=list(T0=T0.indscal,TA=TA.indscal,power.mc=power.mc.INDSCAL,
real.size.mc=real.size.mc.INDSCAL,real.power.mc=power.at.real.size.mc.INDSCAL
)
)
)
}
|
b51d1df041d22ec7c2c984d31b3902d4f64af38e
|
ee788a605dfd2b054cb4dc5d769728babfb5dd92
|
/man/TextReuseTextDocument-accessors.Rd
|
34633225fb72b27641ad4e12e1cb5d51e5b8efd5
|
[
"MIT"
] |
permissive
|
felipegonzalez/textreuse
|
e49236ef00cf1a4a33bfbbeb28d40f2e078658da
|
789fcdae7aa76ca9c207bc0ed41ff0dcf20feb5a
|
refs/heads/master
| 2021-05-16T16:10:41.929945
| 2018-02-01T16:16:59
| 2018-02-01T16:16:59
| 119,786,401
| 0
| 0
| null | 2018-02-01T05:17:04
| 2018-02-01T05:17:04
| null |
UTF-8
|
R
| false
| true
| 713
|
rd
|
TextReuseTextDocument-accessors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TextReuseTextDocument.R
\name{TextReuseTextDocument-accessors}
\alias{TextReuseTextDocument-accessors}
\alias{hashes}
\alias{hashes<-}
\alias{minhashes}
\alias{minhashes<-}
\alias{tokens}
\alias{tokens<-}
\title{Accessors for TextReuse objects}
\usage{
tokens(x)
tokens(x) <- value
hashes(x)
hashes(x) <- value
minhashes(x)
minhashes(x) <- value
}
\arguments{
\item{x}{The object to acess.}
\item{value}{The value to assign.}
}
\value{
Either a vector or a named list of vectors.
}
\description{
Accessor functions to read and write components of
\code{\link{TextReuseTextDocument}} and \code{\link{TextReuseCorpus}}
objects.
}
|
1eeab4f290e69b831d1112636c1bb63ff3679d75
|
bfc4dde4eac32663f768e75edde091d20676308f
|
/R/print.mitml.testConstraints.R
|
1ac3dcfdb7b28ebbb9126089cf3c3d9cfa3d0cf5
|
[] |
no_license
|
simongrund1/mitml
|
f33b4fda8e929a8652146ca7bcd8011d34f9ebc6
|
4f1e20daccf45da1ee157b3e2e78d7b250fd8203
|
refs/heads/master
| 2023-03-21T09:20:56.636023
| 2023-03-10T15:42:30
| 2023-03-10T15:42:30
| 68,100,636
| 28
| 9
| null | 2021-10-05T09:25:07
| 2016-09-13T11:01:02
|
R
|
UTF-8
|
R
| false
| false
| 1,587
|
r
|
print.mitml.testConstraints.R
|
print.mitml.testConstraints <- function(x, digits = 3, sci.limit = 5, ...){
# print method for MI estimates
cll <- x$call
test <- x$test
constraints <- x$constraints
method <- x$method
m <- x$m
adj.df <- x$adj.df
df.com <- x$df.com
# print header
cat("\nCall:\n", paste(deparse(cll)), sep = "\n")
cat("\nHypothesis test calculated from", m, "imputed data sets. The following\nconstraints were specified:\n\n")
# print constrained estimates
est <- cbind(x$Qbar, sqrt(diag(x$T)))
colnames(est) <- c("Estimate", "Std. Error")
rownames(est) <- paste0(constraints, ":")
out <- .formatTable(est, digits = digits, sci.limit = sci.limit)
for(i in seq_len(nrow(out))) cat(" ", out[i,], "\n")
# print method
cat("\nCombination method:", method, "\n\n")
# print test results
test.digits <- c(digits, 0, rep(digits, ncol(test)-2))
out <- .formatTable(test, digits = test.digits, sci.limit = sci.limit)
for(i in seq_len(nrow(out))) cat(" ", out[i,], "\n")
# print footer
if(method == "D1"){
cat("\n")
if(adj.df){
cat(c("Hypothesis test adjusted for small samples with",
paste0("df=[", paste(df.com, collapse = ","), "]\ncomplete-data degrees of freedom.")))
}else{
cat("Unadjusted hypothesis test as appropriate in larger samples.")
}
cat("\n")
}
cat("\n")
invisible()
}
summary.mitml.testConstraints <- function(object, ...){
# summary method for objects of class mitml.testConstraints
print.mitml.testConstraints(object, ...)
}
|
4341bea4343e7e955541e557b69294151b2edf86
|
bcce8dcb8fb272b7a5bbfa226dd877b53212413b
|
/predict-admission/explore/investigate-models-new.R
|
0c5352593f9458cfe153baaa476846c2d8e94979
|
[
"BSD-3-Clause"
] |
permissive
|
joefarrington/EDcrowding
|
b04eb9e3c328f7ab2d251e70eb5a6427a4a5566b
|
59e664da4ff3db9a21bde89d1e909ea3d829f8ed
|
refs/heads/master
| 2023-05-05T13:59:37.230430
| 2021-05-05T18:20:14
| 2021-05-05T18:20:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,533
|
r
|
investigate-models-new.R
|
# Create functions --------------------------------------------------------
madcap = function(pred){
# ref to MADCAP section in Model Evaluation.docx
pred[, truth := as.numeric(truth)]
pred[, truth := if_else(truth == 2, 0, truth)]
new_pred = pred[order(pred$prob.1),] # step 1
L = nrow(new_pred)
y1 = c()
y2 = c()
x = c(1:L)
model = 0
real = 0
for (i in 1:L){ # step 2
# s = new_pred[1:i,]
# print(new_pred$.pred_TRUE[i])
model = model + new_pred$prob.1[i] # step 4: getting the expectation from predictions
# step 3 not actually necessary,
real = real + as.numeric(new_pred$truth[i])
y1 = c(y1, model)
y2 = c(y2, real)
}
return(data.frame(x=x,y1=y1,y2=y2))
}
# Load data ---------------------------------------------------------------
# Use run_MLR.R to generate dm000p datasets and tasks
scores_file <- paste0("~/EDcrowding/predict-admission/data-output/scores_",today(),".rda")
load(scores_file)
preds_file <- paste0("~/EDcrowding/predict-admission/data-output/preds_",today(),".rda")
load(preds_file)
# Plot madcap ---------------------------------
all_madcap = data.table()
preds <- preds[tuning_round == "nrounds" & dttm > '2021-02-22 11:00:09']
for (ts_ in timeslices) {
name_tsk <- paste0("task", ts_)
# score predictions on training and validation set
pred_val = preds[timeslice == name_tsk & tsk_ids == "val"]
# get data for madcap
mc_result = as.data.table(madcap(pred_val))
mc_result[, model := ts_]
# mc_result[, distribution := case_when(x < nrow(mc_result)/3 ~ "lower",
# x > nrow(mc_result)*2/3 ~ "upper",
# TRUE ~ "middle")]
all_madcap = bind_rows(all_madcap, mc_result)
}
# single madcap plot
pred_val060 = pred_val060[order(pred_val060$prob.1),]
mc_result = madcap(pred_val060[3001:nrow(pred_val060),])
ggplot(mc_result, aes(x))+
geom_line(aes(y = y1, colour = "model"), size = 1) +
geom_line(aes(y = y2, colour = "data"), size = 1) +
scale_color_manual(breaks = c('model','data'), values = c('model'='red','data'='black')) +
labs(x='No. of patients (ordered by risk factor)',y='Number of admissions',
title = paste0("Madcap plot for timeslice ", as.numeric(ts_))) +
theme(legend.title = element_blank())
# all timeslices
all_madcap %>% ggplot(aes(x))+
geom_line(aes(y = y1, colour = "model"), size = 1) +
geom_line(aes(y = y2, colour = "data"), size = 1) +
scale_color_manual(breaks = c('model','data'), values = c('model'='red','data'='black')) +
labs(x='No. of patients (ordered by risk factor)',y='Number of admissions',
title = paste0("Madcap plot for timeslice - predictions on validation set")) +
theme(legend.title = element_blank()) +
facet_wrap(vars(model), nrow = 3, ncol = 3, scales = "free")
# all timeslices divided into thirds
all_madcap %>% ggplot(aes(x))+
geom_line(aes(y = y1, colour = "model"), size = 1) +
geom_line(aes(y = y2, colour = "data"), size = 1) +
scale_color_manual(breaks = c('model','data'), values = c('model'='red','data'='black')) +
labs(x='No. of patients (ordered by risk factor)',y='Number of admissions',
title = paste0("Madcap plot for timeslice ")) +
theme(legend.title = element_blank()) +
facet_grid(distribution ~ model, scales = "free")
# Looking at effect of different number of nrounds ------------------------
# Run nrounds in run-ML.R and save predictions for training set to preds
# NB this is for just one timeslice
all_madcap = data.table()
for (nrounds in c(30, 45, 60, 75)) {
pred_train <- preds[param_value == nrounds]
mc_result = as.data.table(madcap(pred_train))
mc_result[, param_value := nrounds]
all_madcap = bind_rows(all_madcap, mc_result)
}
# all timeslices
all_madcap %>% ggplot(aes(x))+
geom_line(aes(y = y1, colour = "model"), size = 1) +
geom_line(aes(y = y2, colour = "data"), size = 1) +
scale_color_manual(breaks = c('model','data'), values = c('model'='red','data'='black')) +
labs(x='No. of patients (ordered by risk factor)',y='Number of admissions',
title = paste0("Madcap plot for nround value ")) +
theme(legend.title = element_blank()) +
facet_wrap(vars(param_value), scales = "free")
# find observations which changed a lot
preds[, min1 := min(prob.1), by = .(timeslice, row_id)]
preds[, min1 := min(prob.1), by = .(timeslice, row_id)]
preds[, max1 := max(prob.1), by = .(timeslice, row_id)]
preds[, diff1 := max1 - min1, by = .(timeslice, row_id)]
# order by diff1 descending to see the samples whose probs changed the most
preds[, brier := ((truth == 1)-prob.1)^2] # (truth == 1 is converting the factor into value 0 or 1)
brier = preds[, .(SSE = sum(brier), N= .N), by = .(timeslice,param_value)]
brier[, brier := SSE/N]
# to get reference value for brier scores
ref_prob =sum(dm000p[tsk_train_ids, adm == 1])/sum(dm000p[tsk_train_ids, adm == 0])
preds[, brier_r := ((truth == 1) - ref_prob)^2]
preds[, .(SSE = sum(brier), SSE_r = sum(brier_r)), by = param_value]
# according to mlr3 documentation bbrier is
# Look at individual predictions ------------------------------------------
# from http://ema.drwhy.ai/shapley.html
library(DALEX)
name_tsk <- paste0("task", ts_)
tsk = get(name_tsk)
tsk_train_ids = get(paste0(name_tsk, "_train_ids"))
tsk_val_ids = get(paste0(name_tsk, "_val_ids"))
learner = lrn("classif.xgboost", predict_type = "prob")
learner <- train_learner(learner, tsk, tsk_train_ids, nrounds = 30)
model = Predictor$new(learner, data = x, y = penguins$species)
name_ts <- paste0("dm", ts_, "p")
tsp = get(name_ts)
y_ = tsp[tsk_val_ids, adm]
data_ = tsp[tsk_val_ids]
data_[, adm:= NULL]
# create explainer
explain_ <- DALEX::explain(model = learner,
data = data_,
y = y_) # note y_ has value 1 for admitted and 2 for discharged
pred_val = preds[model == name_tsk]
pred_val[truth == 0 & response == 1]
# understand predictions for a particular case
bd <- predict_parts(explainer = explain_,
new_observation = tsp[42,],
type = "break_down")
p = plot(bd)
p + labs(title = "Breakdown plot for a false positive in timeslice 30 () ",
subtitle = "Green denotes features pushing the predictions towards 2 (which is discharge)")
imp <- data.table(learner$importance())
imp$feature <- names(learner$importance())
# # train with best parameters
#
# imp_results <- data.table()
# pred_results <- data.table()
#
# for (ts_ in timeslices) {
#
# name_ <- paste0("instance", ts_)
# instance = get(name_)
# learner$param_set$values = instance$result_learner_param_vals
#
# name_ <- paste0("task", ts_)
# tsk = get(name_)
# tsk_train_ids = get(paste0(name_, "_train_ids"))
# tsk_val_ids = get(paste0(name_, "_val_ids"))
#
# learner$train(tsk, row_ids = tsk_train_ids)
#
# # save importances
# imp <- data.table(learner$importance())
# imp$feature <- names(learner$importance())
# imp[, model := name_]
# imp_results <- bind_rows(imp_results, imp)
#
#
# # predict
# pred = as.data.table(learner$predict(tsk, row_ids = tsk_val_ids))
# pred_ = data.table(pred$P)
# pred[, model := name_]
# pred_results <- bind_rows(pred_results, pred)
# }
#
imps = imps[dttm > '2021-02-22 11:00:09']
imps[, count := .N, by = feature]
imps[count >10 & tsk_ids == "val" & !feature %in% c("quarter_1", "quarter_2", "quarter_3", "quarter_4",
"tod_1", "tod_2", "tod_3", "tod_4", "tod_5", "tod_6")] %>%
ggplot(aes(x = gsub("task","", timeslice), y = reorder(feature, desc(feature)), fill = importance)) + geom_tile() +
scale_fill_gradient(low="white", high="red") +
labs(title = "Feature importances by timeslice",
fill = "Importance",
x = "Timeslice",
y = "Feature")
#
# save(imp_results, file = paste0("EDcrowding/predict-admission/data-output/imp_results",today(),".rda"))
# save(pred_results, file = paste0("EDcrowding/predict-admission/data-output/pred_results",today(),".rda"))
# Plots of results --------------------------------------------------------
scores_table %>% filter(param_ != "max_depth") %>%
mutate(param_ = factor(param_, levels = c("base", "nrounds", "min_child_weight"))) %>%
group_by(model, param_) %>% summarise(max_ = max(val_auc)) %>%
ggplot(aes(col = model, y = max_, x = param_, group = model)) + geom_line() +
facet_grid(. ~ model)
# I managed to create mean shap values by exluding the adm label which otherwise causes an error
shap_values = shap.values(learner$model, dm360p[tsk_train_ids, 3:ncol(dm360p)])
shap_tibble <- as_tibble(labels(shap_values$mean_shap_score)) %>% rename(Feature = value) %>%
bind_cols(Mean_Shap = shap_values$mean_shap_score)
importance %>% left_join(shap_tibble) %>%
pivot_longer(Gain:Mean_Shap, names_to = "importance_type", values_to = "values") %>%
mutate(Feature = fct_reorder(Feature, values)) %>%
ggplot(aes(x = Feature, y = values, fill = importance_type)) + geom_bar(stat = "identity") +
facet_wrap( ~ importance_type) + coord_flip() + theme(legend.position = "none") +
labs(title = "Model importances for 60 min timeslice excluding admission characteristics - Post Surge1")
# train leaner on task
pred = as.data.table(learner$predict(tsk, row_ids = tsk_val_ids))
x = merge(dm360p[tsk_val_ids], pred, by = row_id)
|
78d7f7ae63fa7abaf7ad5462f908493bbc9cb25a
|
13e0b0c688fae191e7369959b2ae56ab2d3106f7
|
/man/compute_laurent.Rd
|
84d52c5610c2e6c1e564954661d482ce15157a1c
|
[] |
no_license
|
cran/slim
|
855c4a3486b3caf04fea91aa8ee5377e6e71efb2
|
0684337ee2008e939554676bca0b1d16bf34d951
|
refs/heads/master
| 2020-12-24T12:01:43.998162
| 2017-05-15T05:39:33
| 2017-05-15T05:39:33
| 73,098,208
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 753
|
rd
|
compute_laurent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim.R
\name{compute_laurent}
\alias{compute_laurent}
\title{Laurent Expansion of Inverse of Linear Matrix Function}
\usage{
compute_laurent(V, zapsmall = TRUE)
}
\arguments{
\item{V}{for some integer m >= 1, an array of dimension (m, m, 2), where V[,
, 1] is the intercept and V[, , 2] is the slope of the linear matrix
function.}
\item{zapsmall}{logical: should zapsmall be called on the result? Default TRUE.}
}
\value{
array of dimension (m, m, 2), where W[, , 1] corresponds to the
exponent -1, and W[, , 2] corresponds to the exponent 0.
}
\description{
This function computes the first two terms of the Laurent expansion of the
inverse of a linear matrix function.
}
|
a7280aadb7f503fc56ac654c1f392764bda5ad49
|
98c75b9b5b62a0eb351360e686c3d83959e26006
|
/category_subsampling/0_trial/code/functions_ss_plot.R
|
296302fe9ba6064edb5a830389c7c82d43bfeda8
|
[] |
no_license
|
weigcdsb/sub-sampling
|
8e570c1f969833cfda1bf142e3c6b309ae188746
|
dbd4a538085029e9aa8683fba2205f49199f42d9
|
refs/heads/master
| 2023-03-22T22:47:29.078121
| 2021-03-24T01:09:53
| 2021-03-24T01:09:53
| 307,103,256
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,186
|
r
|
functions_ss_plot.R
|
#################################
#### common functions
weighted.MLE <- function(X, y, ssp, maxIter = 1000){
beta <- rep(0, ncol(X))
update <- Inf
iter <- 0
while((sum(update^2) > 1e-6)& (iter < maxIter)){
eta <- X %*% beta
prob <- c(exp(eta)/(1 + exp(eta)))
OP <- t(X) %*% (prob * (1 - prob)* X/ ssp)
update <- solve(OP) %*% apply((y - prob)* X/ ssp, 2, sum)
beta <- beta + update
iter <- iter + 1
}
if(iter < maxIter){
return(beta)
}else{
print('Not Converge')
return()
}
}
pilot <- function(X, y, r0){
n1 <- sum(y)
n0 <- n - n1
pilot.ssp <- rep(1/(2*n0), n)
pilot.ssp[y==1] <- 1/(2*n1)
pilot.idx <- sample(1:n, r0, replace = T, prob = pilot.ssp)
pilot.y <- y[pilot.idx]
pilot.X <- X[pilot.idx, ]
pilot.ssp.star <- pilot.ssp[pilot.idx]
return(list(beta = weighted.MLE(pilot.X, pilot.y, pilot.ssp.star),
ssp = pilot.ssp.star,
idx = pilot.idx,
X = pilot.X, y = pilot.y))
}
#################################
#### different sub-sampling procedure
## uniform, A-optimization & L-optimization
ss_core <- function(y, X, S = 1000, r0 = 200,
r = c(100, 200, 300, 500, 700, 1000)){
n <- dim(X)[1]
nBeta <- dim(X)[2]
beta.mle <- glm(y ~ X - 1,
family = binomial(link = 'logit'))$coefficients
## (a) full data
full.beta.boot <- matrix(NA, nrow = S, ncol = nBeta)
for(i in 1:S){
tmp.idx <- sample(1:n, n, replace = T)
tmp.y <- y[tmp.idx]
tmp.X <- X[tmp.idx, ]
full.beta.boot[i, ] <- glm(tmp.y ~ tmp.X - 1,
famil = binomial(link = 'logit'))$coefficients
}
full.mse <- mean(apply((X %*% t(full.beta.boot -
matrix(rep(beta.mle, S),
nrow = S, byrow = T)))^2,
2, sum))/(n - nBeta)
## (b) different SSPs
# b.1 uniform
unif.mse <- rep(NA, length(r))
for(i in 1:length(r)){
unif.r.total <- r[i] + r0
unif.beta.boot <- matrix(NA, nrow = S, ncol = nBeta)
for(j in 1:S){
unif.idx <- sample(1:n, unif.r.total, replace = T)
unif.y <- y[unif.idx]
unif.X <- X[unif.idx, ]
unif.ssp <- 1/n
unif.beta.boot[j, ] <- t(weighted.MLE(unif.X, unif.y, unif.ssp))
}
unif.mse[i] <- mean(apply((X %*% t(unif.beta.boot -
matrix(rep(beta.mle, S),
nrow = S, byrow = T)))^2,
2, sum))/(n - nBeta)
}
# b.2 mMSE
mMSE.mse <- rep(NA, length(r))
for(i in 1:length(r)){
mMSE.beta.boot <- matrix(NA, nrow = S, ncol = nBeta)
for(j in 1:S){
pilot.result <- pilot(X, y, r0)
pilot.beta <- pilot.result$beta
pilot.eta <- X %*% pilot.beta
pilot.prob <- exp(pilot.eta)/(1 + exp(pilot.eta))
pilot.prob.sub <- pilot.prob[pilot.result$idx]
pilot.W <- solve(t(pilot.result$X)%*% (pilot.result$X*
pilot.prob.sub*(1-pilot.prob.sub)/
pilot.result$ssp))
mMSE.ssp <- abs(y - pilot.prob)*sqrt(apply((X %*% pilot.W)^2, 1, sum))
mMSE.ssp <- mMSE.ssp/sum(mMSE.ssp)
mMSE.idx <- sample(1:n, r[i], replace = T, prob = mMSE.ssp)
# combined
mMSE.y <- y[c(mMSE.idx, pilot.result$idx)]
mMSE.X <- X[c(mMSE.idx, pilot.result$idx), ]
mMSE.ssp.star <- c(mMSE.ssp[mMSE.idx], pilot.result$ssp)
mMSE.beta.boot[j, ] <- t(weighted.MLE(mMSE.X, mMSE.y, mMSE.ssp.star))
}
mMSE.mse[i] <- mean(apply((X %*% t(mMSE.beta.boot -
matrix(rep(beta.mle, S),
nrow = S, byrow = T)))^2,
2, sum))/(n - nBeta)
}
# b.3 mVC
mVC.mse <- rep(NA, length(r))
for(i in 1:length(r)){
mVC.beta.boot <- matrix(NA, nrow = S, ncol = nBeta)
for(j in 1:S){
pilot.result <- pilot(X, y, r0)
pilot.beta <- pilot.result$beta
pilot.eta <- X %*% pilot.beta
pilot.prob <- exp(pilot.eta)/(1 + exp(pilot.eta))
mVC.ssp <- abs(y - pilot.prob)*sqrt(apply(X^2, 1, sum))
mVC.ssp <- mVC.ssp/sum(mVC.ssp)
mVC.idx <- sample(1:n, r[i], replace = T, prob = mVC.ssp)
# combined
mVC.y <- y[c(mVC.idx, pilot.result$idx)]
mVC.X <- X[c(mVC.idx, pilot.result$idx), ]
mVC.ssp.star <- c(mVC.ssp[mVC.idx], pilot.result$ssp)
mVC.beta.boot[j, ] <- t(weighted.MLE(mVC.X, mVC.y, mVC.ssp.star))
}
mVC.mse[i] <- mean(apply((X %*% t(mVC.beta.boot -
matrix(rep(beta.mle, S),
nrow = S, byrow = T)))^2,
2, sum))/(n - nBeta)
}
return(list(r = r,
unif.mse = unif.mse,
mMSE.mse = mMSE.mse,
mVC.mse = mVC.mse,
full.mse = full.mse))
}
#################################
## plot 1
subsmaple_plot <- function(ss_mses){
r <- ss_mses$r
unif.mse <- ss_mses$unif.mse
mMSE.mse <- ss_mses$mMSE.mse
mVC.mse <- ss_mses$mVC.mse
full.mse <- ss_mses$full.mse
plot(r, unif.mse, type = 'b', ylim = c(0, 1),
col = 1, pch = "1", lwd = 2,
main = 'mzNormal', ylab = 'MSE')
lines(r, mMSE.mse, type = 'b', col = 2, pch = "2", lwd = 2)
lines(r, mVC.mse, type = 'b', col = 3, pch = "3", lwd = 2)
abline(h = full.mse, lty = 2, lwd = 2, col = 1)
legend('topright', lwd = 2,
lty = c(rep(1, 3), 2), col = c(1:3, 1),
pch = c(as.character(1:3), NA),
legend = c('uniform', 'mMSE', 'mVc', 'full'))
}
## compare plot
compPlot <- function(ss_mses1, ss_mses2,
code1, code2, ylim){
plot(ss_mses1$r, ss_mses1$unif.mse, type = 'b', ylim = ylim,
col = 1, pch = "1", lwd = 2, lty = 1,
xlab = 'r', ylab = 'MSE', main = paste(code1, 'vs.', code2))
lines(ss_mses2$r, ss_mses2$unif.mse,
type = 'b', col = 1, pch = "1", lwd = 2, lty = 2)
lines(ss_mses1$r, ss_mses1$mMSE.mse,
type = 'b', col = 2, pch = "2", lwd = 2, lty = 1)
lines(ss_mses2$r, ss_mses2$mMSE.mse,
type = 'b', col = 2, pch = "2", lwd = 2, lty = 2)
lines(ss_mses1$r, ss_mses1$mVC.mse,
type = 'b', col = 3, pch = "3", lwd = 2, lty = 1)
lines(ss_mses2$r, ss_mses2$mVC.mse,
type = 'b', col = 3, pch = "3", lwd = 2, lty = 2)
abline(h = ss_mses1$full.mse, lty = 1, lwd = 2, col = 4)
abline(h = ss_mses2$full.mse, lty = 2, lwd = 2, col = 4)
legend('topright', lwd = 2,
lty = rep(1:2, 4), col = rep(1:4, rep(2, 4)),
pch = c(as.character(rep(1:3, rep(2, 3))), NA, NA),
legend = c(paste('uniform:', code1),
paste('uniform:', code2),
paste('mMSE:', code1),
paste('mMSE:', code2),
paste('mVc:', code1),
paste('mVc:', code2),
paste('full:', code1),
paste('full:', code2)))
}
|
803b9e4d5f72d3d8430677cfc65fcd25d30c9e6c
|
cb5a28b5c2e21f9eef15cbbe802db5c802a5e81f
|
/code/schedR/R/summaryStats.R
|
f6c14f97be18cde0890e6c152eff46283fb285af
|
[] |
no_license
|
johnjosephhorton/sched
|
96f50b255daa5546e66e26a6b22a74ed1f942e5b
|
a5066c4ff1d123c77450b9688a8f5a377e1539c5
|
refs/heads/master
| 2021-01-13T16:31:48.438858
| 2017-01-19T05:07:20
| 2017-01-19T05:07:20
| 44,535,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,714
|
r
|
summaryStats.R
|
#' Return a dataframe of summary stats by year, conditional upon working
#'
#' Return a dataframe of summary stats by year
#'
#' @return data.frame
#' @export
summaryStats <- function(){
data("df_act_work")
df_act_work <- data.table(subset(df_act_work, work))
df_act_work$id <- with(df_act_work, as.numeric(factor(TUCASEID)))
df_act_work[, shift.length := t.end - t.start]
df_act_work[, day.length := max(t.end) - min(t.start), by = id]
df.by.worker <- df_act_work[, list(minutes.worked = sum(shift.length),
num.shifts = .N,
minutes.work.range = max(t.end) - min(t.start),
day.start = min(t.start),
day.end = max(t.end)
), by = list(id, year)]
df.by.year <- df.by.worker[, list(mean.minutes.worked = mean(minutes.worked),
mean.day.start = mean(day.start),
se.day.start = sd(day.start)/sqrt(.N),
se.day.end = sd(day.end)/sqrt(.N),
mean.day.end = mean(day.end),
se.minutes.worked = sd(minutes.worked)/sqrt(.N),
mean.work.range = mean(minutes.work.range),
se.work.range = sd(minutes.work.range)/sqrt(.N),
mean.num.shifts = mean(num.shifts),
se.num.shifts = sd(num.shifts)/sqrt(.N),
num.obs = .N),
by = list(year)]
df.by.year
}
|
0ddcca4a3054998c12685145e80b19fa90e16dbb
|
5fdcdc5f734696f363f58a0cfc5e0a960cee8859
|
/man/GeneExampleData.Rd
|
82c4eb1471c9a7eae3cb87b533623d181e2a9377
|
[] |
no_license
|
lengning/EBSeqHMM
|
4411d43582afc1c0e14306ce2aaa6e3e80250ef6
|
6b6efdb7faea6283cfe1710dfd8087ce2aa15319
|
refs/heads/master
| 2021-01-21T21:43:32.107304
| 2016-03-21T16:10:00
| 2016-03-21T16:10:00
| 31,791,187
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 718
|
rd
|
GeneExampleData.Rd
|
\name{GeneExampleData}
\alias{GeneExampleData}
\docType{data}
\title{
Simulated gene level data set with 5 ordered conditions
}
\description{
'GeneExampleData' gives the gene level simulated data with 5 ordered conditions, triplicates for each condition.
The data set was simulated following the Negative Binomial distribution. The parameters of each gene (mean and overdispersion)
were sampled from the empirical estimates from an empirical RNA-Seq data set from Thomson lab at Morgridge Institute for
Research.
}
\format{
GeneExampleData is a matrix with 100 genes (rows) and 15 samples (columns).
}
\seealso{
IsoExampleList
}
\examples{
data(GeneExampleData)
str(GeneExampleData)
}
\keyword{datasets}
|
99ae69d2d0522b732c2f68872e1355e551ceff53
|
8375d88db0ec7f04d04dd79385754ec831d03ee8
|
/R/PMDA.coxph.R
|
093f76dc9385f0b84b6d1c1b129fa40d4a9bb203
|
[] |
no_license
|
cran/MIICD
|
4346c8879f4049253ad6dce6061ff78f53ba86b3
|
4d9c804c12352edf4d18028091ad09b9bde259a4
|
refs/heads/master
| 2021-01-15T15:47:54.639481
| 2017-05-27T15:20:01
| 2017-05-27T15:20:01
| 19,073,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,061
|
r
|
PMDA.coxph.R
|
PMDA.coxph <-
function(formula , data , k , m ){
#function parameters
prep<-preproc.coxph( data , m )
data_int <- prep$data2
data_fix <-prep$data1
or<-prep$or
I<-prep$I
mm<-model.matrix(formula , data)
nc<-sapply(sapply(strsplit( as.character(formula)[2] , '\\+' ) , function(x) gsub(" " , "", x) ),nchar)
nc2<-sapply(colnames(mm)[-1],nchar)
sub1<-substr( colnames(mm)[-1] , nc+1 , nc2 )
sub2<-paste(names(nc),sub1,sep=': ')
colnames(mm) <- c( colnames( mm )[ 1 ] , sub2 )
dim_beta<-ncol(mm)-1
beta<-matrix(0,ncol=dim_beta,nrow=1)
dn<-dimnames(mm)[[2]][-1]
#Step1
s0 <- MI.surv_1( m = m , data = data , conf.int = F )$est
s0$diff<-c(0,diff(1-s0$surv))
#initial linear predictors
Z<-mm[,-1]%*%t(beta)
#Step 2
cat('\nIterates\n' )
beta_iter<-matrix(NA,ncol=k,nrow=dim_beta,dimnames=list(dn,1:k))
sigmac_iter<-matrix(NA,ncol=k,nrow=dim_beta,dimnames=list(dn,1:k))
#progression bar
i<-0
pb <- txtProgressBar(style = 2 , char = '.' )
repeat {
i<-i+1
setTxtProgressBar(pb , i%%4/150 )
if( i > k ){setTxtProgressBar(pb , 0.02 )
break }
#print(i)
ss1<-apply(data_int , 1 , function(x ) subset( s0 , time >= as.numeric(x['left']) & time <= as.numeric(x['right']) ) )
tk2<-sapply(seq_len(nrow(data_int)) ,function(X) ss1[[X]]$time)
Z <- matrix( rep( Z[,1] , m ) , ncol = m , byrow = F )
#Get the samples from drown betas from the normal mixture
samples <- sapply( seq_len( nrow( data_int ) ) , function( X ) {
#for(X in 1:nrow( data_int )){
#print(X)
pk2 <- sapply( 1:m , function( x ) ss1[[ X ]]$diff^exp( Z[ I , ][ X , x ] ) )
pk2 <- matrix(pk2,ncol=m)
apply( pk2 , 2 , function( x ){
if( sum(unlist(x)) & length(unlist(x)) > 1 ) sample( tk2[[ X ]] , size = 1 , prob = ( x ) )
else data_int[ X , 'right' ] } ) } )
samples <- matrix( unlist( samples ) , ncol = m , byrow = T )
samples2<-rbind(samples,data_fix)[or,]
times<-as.vector(samples2)
#surv<-Surv( time = times , event = rep( data$right != Inf , m ) , type = 'right')
#surv2<-Surv( time = times , event = rep( data$right != Inf , m ) , type = 'mstate')
#surv2[,2] <- surv[,2]
#fitCI<-survfit( surv2 ~ 1 , weights = rep( 1 , length( times ) ) / m , conf.type = 'none' )
#pr <- fitCI$pstate
#t0<- fitCI$time
#ne<- fitCI$n.event
#nt<- rep(t0,ne)
#nt
est_1 <- apply( samples2 , 2 , get.est , data , formula )
#get the betas in a matrix (x * k)
betas <- matrix( unlist( sapply( est_1 , function( x ) x$beta ) ) , nrow = dim_beta , dimnames = list( dn , 1:m ) )
#get the mean of the betas over augmented datasets
beta <- rowMeans( matrix( unlist( sapply( est_1 , function( x ) x$beta ) ) , nrow = dim_beta , dimnames = list( dn , 1:m) ) )
#Get the sigma in an d3 array
sigma <- array( sapply( est_1 , function( x ) x$sigma ) , dim = c( dim_beta , dim_beta , m ) )
#within variance: W
W <- apply( sigma , c( 1 , 2 ) , mean )
#update the CIF
#compute exp (sum of B'Z )
keep <- as.vector(sapply(strsplit( as.character(formula)[2] , '\\+' ) , function(x) gsub(' ' , '', x) ))
adf<-as.data.frame(sapply(keep,function(x) as.data.frame((rep(data[,x],m)))))
colnames(adf)<-keep
r2<-as.numeric(rep( data$right != Inf , m ))
adf2<-data.frame(times,status=r2,adf)
s0<-BBS(formula = formula , time = 'times' , status = 'status' , data = adf2 , beta = beta)
s0 <- rbind( c( time = 0 , surv = 1 ) , s0 , c(max(times),tail(s0$surv,1)))
s0$surv[is.na(s0$surv)] <- 0
s0$diff<-c(0,diff(1-s0$surv))
#Betwin variance: B with inflation factor 1/k
B <- ( 1 + ( 1 / m ) ) * ( ( betas - beta ) %*% t( betas - beta ) / ( m - 1 ) )
#update de variance matrix
sigmac <- W + B
#update linear predictor
Z <- mm[ , -1 ]%*%as.matrix( beta )
beta_iter[ , i ] <- beta
sigmac_iter[ , i ] <- diag( sigmac )
}
close( pb )
ret<-list( beta = beta_iter , sigmac = sigmac_iter , vcov = sigmac , s0 = s0 )
return( ret )
}
|
b56826c9d4844449ccc81c64b7ed8f1ec74b5ac5
|
8f6257bdc76982b9c699a467d01358a00ff3b3dd
|
/cachematrix.R
|
f05ea5c38835b354190cfbdac8a07e3c7d915ed0
|
[] |
no_license
|
cmukhtmu/ProgrammingAssignment2
|
a12cb91ab3976580c952c92ad771a15ae8bb22fc
|
ae8fb642f307d1c7c03c4021f41ee2bc6469f69f
|
refs/heads/master
| 2020-04-14T22:28:19.489006
| 2019-01-05T00:19:03
| 2019-01-05T00:19:03
| 164,163,947
| 0
| 0
| null | 2019-01-05T00:19:05
| 2019-01-04T23:15:52
|
R
|
UTF-8
|
R
| false
| false
| 1,183
|
r
|
cachematrix.R
|
## Functions in this program allows us to create a special kind of matrix.
## This special kind of matrix can cache itself.
## The second function allows us to calculate inverse of this special matrix.
## Once inverse is calculated, the 2nd function calls one of the sub-functions of 1st function to get/set the cached value.
## This function allows us to create a special kind of matrix.
## This function also gets/sets the cached value.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinversematrix <- function(imatrix) i <<- imatrix
getinversematrix <- function() i
list(set = set, get = get,
setinversematrix = setinversematrix,
getinversematrix = getinversematrix)
}
## This function allows us to calculate inverse of the special matrix.
## Once inverse is calculated, this function calls one of the sub-functions of 1st function to get/set the cached value.
cacheSolve <- function(x, ...) {
i <- x$getinversematrix()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinversematrix(i)
i
}
|
99cb4886acdfd7160e00933e782f27e5cda7af95
|
cdaef8cecedebdba8da7a4d14a918ed247af9685
|
/capstonefinal.R
|
f0bccf1280891c418f521aec7f8e48c6f504f71e
|
[] |
no_license
|
VamshiKanderao420/VamshiKanderao
|
456a2dd54083817fe0939811c06d715dfe4cdd9d
|
1547f48fdbea4fb8aff69e838af35e2ee3032cc0
|
refs/heads/master
| 2020-07-09T16:57:36.374535
| 2019-08-28T13:09:59
| 2019-08-28T13:09:59
| 204,027,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45,879
|
r
|
capstonefinal.R
|
# clear the list
# rm(list = ls())
# libraries required
library(shiny)
library(RSQLite)
library(shinythemes)
library(shinydashboard)
library(DT)
library(DBI)
library(dbplyr)
library(dplyr)
library(tidyverse)
library(ggplot2)
library(rmarkdown)
library(rJava)
library(gridExtra)
library(rsconnect)
library(here)
Logged = FALSE
ui <- fluidPage(theme = shinytheme("superhero"),
navbarPage(id = "mainpage1",
strong("Welcome to my capstone project"),
navlistPanel( id = "Navp", widths =c(2, 10),
tabPanel(
title = "Main Menu",
id = "Home",
verbatimTextOutput("HomeInfo"),
br(),
br(),
br(),br(),br(),
strong("Welcome to your Lab Data, Please sign in/sign Up inorder to review/analyse the data")
,br(), br(),br(), br(), br(),br()
,br(), br(),br(), br(), br(),br()
,h4 ("Sample Submission System")
),
tabPanel(
title = "Sign in",
tagList(
div(id = "login",
wellPanel(textInput("userName", "Username"),
passwordInput("passwd", "Password"),
br(),actionButton("Login", "Log in"),
verbatimTextOutput("dataInfo")
)),
tags$style(type="text/css", "login {font-size:10px; text-align: left;position:absolute;top: 40%;left: 50%;margin-top: -100px;margin-left: -150px;}")
)
),
tabPanel(title = "New User",
h1(strong("New User Registration:")),
tagList(
div(id = "NewUser",
wellPanel(
textInput('Name', 'Full Name:', width = '100%', placeholder = "Enter your name"),
textInput ('Role', 'Role:', "customer", width = '100%'),
textInput('CustID', 'User ID:', width = '100%', placeholder = "Enter User ID"),
passwordInput('Password', 'Password:', width = '100%'),
br(),
actionButton("submit", "Submit"),
actionButton("cancel", "Cancel")
)
),
tags$style(type="text/css", "login {font-size:10px; text-align: left;position:absolute;top: 40%;left: 50%;margin-top: -100px;margin-left: -150px;}")
)
),
tabPanel(title = "Submitter",
h1(strong("Order your Test")),
tagList(
div(id = "Customer",
wellPanel(
verbatimTextOutput("CustInfo"),
htmlOutput("CustID"),
selectInput("gender", "Gender:",c("Male", "Female")),
dateInput("RequestDate", "Request Date", format = "yyyy-mm-dd"),
htmlOutput("TestName"),
htmlOutput("LabLocation"),
br(),actionButton("order", "Order"))),
tags$style(type="text/css", "login {font-size:10px; text-align: left;position:absolute;top: 40%;left: 50%;margin-top: -100px;margin-left: -150px;}")
)
),
############ Analyst page page UI
tabPanel(title = "Analyst",
h1(strong("Analyst Page")),
navbarPage("", id = "analystpage",
verbatimTextOutput("AnaInfo"),
frow1 <- fluidRow(
title = "Test Results"
,actionButton("displayResults", label = "Display Records")
,actionButton("action", label = "Update Records")
,br(),br()
, width = "1100px"
,status = "primary"
,solidHeader = TRUE
,collapsible = TRUE
,label = "View Results" ### )
,DTOutput("Results", height = "300px", width = "1100px")
),
tabPanel("Add New Test Types", id= "testtypes",
tagList(
div(id = "TestTypes", br(), br(),
wellPanel(
br(),
textInput ("TestName", "Test Name:"),
br(),actionButton("save", "Save"))),
tags$style(type="text/css", "login {font-size:10px; text-align: left;position:absolute;top: 40%;left: 50%;margin-top: -100px;margin-left: -150px;}")
)
)
)
),
tabPanel(title = "Dash board",
dashboardPage(
dashboardHeader(),
dashboardSidebar(
selectInput("LabLoc", "Lab Location:", choices = c("Mercy Hospital", "Wash U School of medicine")),
radioButtons(inputId = "Ttype", label = h3("Test Type"),
choices = list("Lipid Profile" , "Glucose"),selected = 'Glucose'),
fluidRow(column(2, verbatimTextOutput("Ttype"))),
fluidRow(
column(3,
radioButtons(inputId = "Sex", label = h3("Gender"),
choices = list("Male" , "Female"),selected = 'Male')
)),
fluidRow(
column(3, verbatimTextOutput("Sex"))
)
),
dashboardBody(
fluidRow(valueBoxOutput("value1"),
valueBoxOutput("value2"),
valueBoxOutput("value3"),
br(),br(),br(),
downloadButton('downloadpdf')
),
fluidRow(
box(
title = "Max cholesterol/Diabetic By Location"
,status = "primary"
,solidHeader = TRUE
,collapsible = TRUE
,plotOutput("MaxTestResultsbyType", height = "300px")
)
,box(
title = "cholesterol by Customer"
,status = "primary"
,solidHeader = TRUE
,collapsible = TRUE
,plotOutput("TestResultsPerCustomer", height = "300px")
)
)
)
)
)
, tabPanel(actionButton("Logout", "Logout") )
)
),
uiOutput("page")
)
sqlitePath <- "C:/Users/18127/Documents/Capstonefinaldb/data.sqlite"
NewUserRegistration <- c("Password","Name","CustID","Role")
NewTestTypes <- c("TestName")
NewTestOrder <- c("CustID","gender", "RequestDate","TestName","LabLocation", "Test1Std")
server <- function(input, output, session) {
hideTab(inputId = "Navp", target = "Dash board")
hideTab(inputId = "Navp", target = "Analyst")
hideTab(inputId = "Navp", target = "Submitter")
Logged = FALSE
formData <- reactive({
data <- sapply(NewUserRegistration, function(x) input[[x]])
data
})
table <- "USERS"
observeEvent(input$submit, {
saveData(formData())
updateNavlistPanel(session, "Navp", selected = "Login")
})
saveData <- function(data) {
db <- dbConnect(SQLite(), sqlitePath)
query <- sprintf(
"INSERT INTO %s (%s) VALUES ('%s')",
table,
paste(names(data), collapse = ", "),
paste(data, collapse = "', '")
)
# Submit the update query and disconnect
dbGetQuery(db, query)
dbDisconnect(db)
}
newTestFormData <- reactive({
newtestdata <- sapply(NewTestTypes, function(x) input[[x]])
newtestdata
})
table1 <- "TestTypes"
observeEvent(input$save, {
saveTestData(newTestFormData())
})
saveTestData <- function(newtestdata) {
db <- dbConnect(SQLite(), sqlitePath)
query <- sprintf(
"INSERT INTO %s (%s) VALUES ('%s')",
table1,
paste(names(newtestdata), collapse = ", "),
paste(newtestdata, collapse = "', '")
)
dbGetQuery(db, query)
dbDisconnect(db)
}
TestOrderFormData <- reactive({
orderdata <- sapply(NewTestOrder, function(x) input[[x]])
orderdata$RequestDate <- as.character(orderdata$RequestDate)
if (orderdata$TestName == "Glucose") {
orderdata$Test1 <- "Fasting"
orderdata$Test1Std <- "60 - 100 mg/dL"
orderdata$Test2 <- "Post-2Hrs"
orderdata$Test2Std <- "120 - 180 mg/dL"
}
if (orderdata$TestName == "Lipid Profile") {
orderdata$Test1 <- "Cholesterol"
orderdata$Test1Std <- "<200 mg/dL"
orderdata$Test2 <- "Triglycerides"
orderdata$Test2Std <- "<150 mg/dL"
orderdata$Test3 <- "HDL Cholesterol"
orderdata$Test3Std <- ">40 mg/dL"
orderdata$Test4 <- "LDL Calculated"
orderdata$Test4Std <- "<130 mg/dL"
}
orderdata
})
ordertable <- "TestResults"
observeEvent(input$order, {
if (input$CustID =="None") {
output$CustInfo <- renderText({"Please login . Thank you!! "})
return()
}
saveOrderData(TestOrderFormData())
})
saveOrderData <- function(orderdata) {
db <- dbConnect(SQLite(), sqlitePath)
query <- sprintf(
"INSERT INTO %s (%s) VALUES ('%s')",
ordertable,
paste(names(orderdata), collapse = ", "),
paste(orderdata, collapse = "', '")
)
dbGetQuery(db, query)
dbDisconnect(db)
output$CustInfo <- renderText({"You have successfully placed your tests. Thank you!!!!"})
return()
}
observeEvent(input$cancel, {
updateTextInput(session, "Name", value = '')
updateTextInput(session, "CustID", value = '')
updateTextInput(session, "Password", value = '')
})
USER <- reactiveValues(Logged = Logged)
inputdata <- reactive({
validate(need(isolate(input$userName) == "", "Please Enter User name"))
})
observeEvent(input$Login, {
output$dataInfo <- renderText({""})
### Check if user already logged in
if (USER$Logged) {
output$dataInfo <- renderText(stop({"You have already logged in!!!!!!"}))
return()
}
if(input$userName == "" & input$passwd == "") {
output$dataInfo <- renderText({"Please check your credentials"})
return()
}
if(input$userName == "" ) {
output$dataInfo <- renderText({"Please check your User"})
return()
}
if(input$passwd == "") {
output$dataInfo <- renderText({"Please check your password"})
return()
}
if (USER$Logged == FALSE) {
if (!is.null(input$Login)) {
if (input$Login > 0) {
Username <- isolate(input$userName)
Password <- isolate(input$passwd)
query <- sprintf({"
SELECT CustID, Role
FROM USERS
WHERE CustID ='%s' and Password ='%s'"},
Username, Password, serialize=F)
db <- dbConnect(SQLite(), sqlitePath)
userrec <- dbGetQuery(db, query)
dbDisconnect(db)
if (length(userrec$CustID) == 0 ) {
output$dataInfo <- renderText({"If you are a new CUSTOMER please sign up first"})
return()
} else {
if ( userrec$CustID == Username ) {
USER$Logged <- TRUE}
successInfo <- cbind ("You have successfully logged on now as", Username)
output$HomeInfo <- renderText({successInfo})
output$CustInfo <- renderText({successInfo})
output$dataInfo <- renderText({""})
}
}
}
}
if (USER$Logged == TRUE)
{
output$CustID <- renderUI({
selectInput("CustID", "Customer ID", userrec$CustID) })
updateTextInput(session, "userName", value = '')
updateTextInput(session, "passwd", value = '')
if ( userrec$Role == "analyst" ) {
showTab(inputId = "Navp", target = "Dash board")
showTab(inputId = "Navp", target = "Analyst")
showTab(inputId = "Navp", target = "New User")
hideTab(inputId = "Navp", target = "Login")
# hideTab(inputId = "Navp", target = "NewUser")
hideTab(inputId = "Navp", target = "Customer")
updateNavlistPanel(session, "Navp", selected = "Analyst")
}
if ( userrec$Role == "customer" ) {
showTab(inputId = "Navp", target = "Customer")
hideTab(inputId = "Navp", target = "Dash board")
hideTab(inputId = "Navp", target = "Analyst")
hideTab(inputId = "Navp", target = "Login")
hideTab(inputId = "Navp", target = "New User")
updateNavlistPanel(session, "Navp", selected = "Customer")}
}
})
observeEvent(input$Logout, {
USER$Logged <- FALSE
hideTab(inputId = "Navp", target = "Customer")
hideTab(inputId = "Navp", target = "Analyst")
showTab(inputId = "Navp", target = "Login")
hideTab(inputId = "Navp", target = "Dash board")
showTab(inputId = "Navp", target = "New User")
updateTextInput(session, "userName", value = '')
updateTextInput(session, "passwd", value = '')
output$dataInfo <- renderText({""})
output$HomeInfo <- renderText({"You Have successfully Logged out"})
output$CustInfo <- renderText({""})
output$CustID <- renderUI({
selectInput("CustID", "Customer ID", "") })
updateNavlistPanel(session, "Navp", selected = "Home")
})
#####################Loaddata function
loadData <- function(fields, table, sortCol= '' , whereCls = ''){
if (whereCls == "")
query <- sprintf("SELECT %s FROM %s", fields, table)
else
query <- sprintf("SELECT %s FROM %s WHERE %s", fields, table, whereCls)
db <- dbConnect(SQLite(), sqlitePath)
dataDB <- dbGetQuery(db, query)
if(sortCol != "") dataDB[order(dataDB[sortCol]),]
else dataDB
dbDisconnect(db)
print(dataDB)
}
output$CustID <- renderUI({
selectInput("CustID", "Customer ID", "None") })
Listdata <- loadData("TestName", "TestTypes","TestName","")
Testnamelist <- setNames(Listdata$TestName, Listdata$TestName)
output$TestName <- renderUI({
selectInput("TestName", "Test Name: ", Testnamelist)
})
Listdata1 <- loadData("LabLocation", "Location","LabLocation","")
LabLoclist <- setNames(Listdata1$LabLocation, Listdata1$LabLocation)
output$LabLocation <- renderUI({
selectInput("LabLocation", "Lab: ", LabLoclist)
})
observeEvent(input$displayResults, {
db <- dbConnect(SQLite(), sqlitePath)
datatb <- tbl(db, "TestResults")
datatb <- datatb %>% as.data.frame()
TestResults <- datatb
output$Results <- renderDT(TestResults, options =
list(scrollX = TRUE), editable = TRUE)
proxy1 = dataTableProxy('Results')
TestResults_rows <- which(TestResults$TestName != "" | is.na(TestResults$TestName) )
observeEvent(input$Results_cell_edit, {
info = input$Results_cell_edit
str(info)
i = info$row
j = info$col
v = info$value
new_value <- DT::coerceValue(v, TestResults[i, j])
TestResults[i, j] <<- new_value
datatb[TestResults_rows[i], j] <<- new_value
replaceData(proxy1, TestResults, resetPaging = TRUE) # important
})
observeEvent(input$action, {
dbWriteTable(db, "TestResults", data.frame(datatb), overwrite = TRUE)
})
})
#dashboard
db <- dbConnect(SQLite(), sqlitePath)
testresultstabel <- tbl(db, "TestResults")
testresultstabel <- testresultstabel %>% as.data.frame()
vals <- reactiveValues(MaxTestResultsbyType=NULL, TestResultsPerCustomer = NULL)
Dashboarddata <- reactive({
Dashboarddata <- testresultstabel %>%
filter(LabLocation %in% input$LabLoc)
if(is.null(input$Sex))
return()
Dashboarddata
})
output$value1 <- renderValueBox({
valueBox(h4("Total Tests by Hospital:"),
formatC(count(Dashboarddata()), format="d", big.mark=','),
paste('Total Tests by Hospital:',count(Dashboarddata()))
,icon = icon("stats",lib='glyphicon')
,color = "purple")
})
Dashboarddata2 <- reactive({
Dashboarddata2 <- testresultstabel %>%
filter(LabLocation %in% input$LabLoc) %>%
filter(TestName %in% input$Ttype)
if(is.null(input$Ttype))
return()
Dashboarddata2
})
output$value2 <- renderValueBox({
valueBox(h4("Total Tests by Test Type:"),
formatC(count(Dashboarddata2()), format="d", big.mark=','),
paste('Total Tests',count(Dashboarddata2()))
,icon = icon("stats",lib='glyphicon')
,color = "fuchsia")
})
Dashboarddata3 <- reactive({
Dashboarddata3 <- testresultstabel %>%
filter(LabLocation %in% input$LabLoc) %>%
filter(TestName %in% input$Ttype) %>%
filter(gender %in% input$Sex)
if(is.null(input$Sex))
return()
Dashboarddata3
})
output$value3 <- renderValueBox({
valueBox(h4("Total Tests by Gender:"),
formatC(count(Dashboarddata3()), format="d", big.mark=','),
paste('Total Tests by Gender:',count(Dashboarddata3()))
,icon = icon("stats",lib='glyphicon')
,color = "green")
})
#creating the plotOutput
output$MaxTestResultsbyType <- renderPlot({
vals$MaxTestResultsbyType <- ggplot(data =Dashboarddata2(),
aes(x=TestName, y=TestResults, fill=factor(gender))) +
geom_bar(position = "dodge", stat = "identity") + ylab("Test Results") +
xlab("Test Name") + theme(legend.position="bottom"
,plot.title = element_text(size=15, face="bold")) +
labs(fill = "gender")
vals$MaxTestResultsbyType
})
Dashboarddata4 <- reactive({
Dashboarddata4 <- testresultstabel %>%
filter(LabLocation %in% input$LabLoc) %>%
filter(TestName == "Lipid Profile")
if(is.null(input$Ttype))
return()
Dashboarddata4
})
output$TestResultsPerCustomer <- renderPlot({
vals$TestResultsPerCustomer <- ggplot(Dashboarddata4(),
aes(x = CustID, y = TestResults,fill=factor(TestName))) +
geom_point(size = 5, stat = "identity") + ylab("Test Results") +
xlab("Customer") + theme(legend.position="bottom"
,plot.title = element_text(size=15, face="bold")) +
ggtitle("Test Results by Customer") + labs(fill = "Test Name")
vals$TestResultsPerCustomer
})
output$downloadReport <- downloadHandler(
filename = function() {
paste("downloadReport.pdf",sep="")},
content = function(file) {
pdf(file)
grid.arrange(vals$MaxTestResultsbyType, vals$TestResultsPerCustomer)
dev.off()
}
)
}
shinyApp(ui = ui, server = server)
|
4b3a9674c15ab2aa350baa709e6cb222e77f23c3
|
0dab3522081b5089b96945d431720b8dd004b193
|
/carte_reg_dom.R
|
119a6b84c6b52fb19cfe5b095a8c65bbca4c9de1
|
[] |
no_license
|
slevu/carte_listeria
|
0b79a0b2758e8c4796edf32744e7d05a961d7fb1
|
94ffe0929ad2050fa76c457cb883481ea9c4333b
|
refs/heads/master
| 2022-12-09T22:51:45.173561
| 2020-08-27T14:02:18
| 2020-08-27T14:02:18
| 290,785,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,825
|
r
|
carte_reg_dom.R
|
## Carte avec deux labels colorés par région
carte <- function(FNSHP = "extdata/Region_France_DOM_simplifie_2016.shp",
dat = read.csv2("mockdata.csv"),
nozero = TRUE,
colvalue1 = "red", colvalue2 = "green4",
justvalue1 = "left", justvalue2 = "right",
...){
##- libs
require(rgdal)
require(broom)
require(ggplot2)
##- shape
map0 <- rgdal::readOGR(dsn = FNSHP, verbose = FALSE)
suppressWarnings( positions <- broom::tidy(map0, region="NEW_REG") )
positions$id <- as.numeric(as.character(positions$id))
##- centroid
x <- sf::st_as_sf(map0)
suppressWarnings( cent <- sf::st_centroid(x) )
levels(cent$NEW_REG) <- as.character(as.numeric( levels(cent$NEW_REG) ))
dd0 <- data.frame(id = as.numeric(as.character(cent$NEW_REG)),
matrix(unlist(cent$geometry),
ncol = 2,
byrow = TRUE,
dimnames = list(NULL, c("long", "lat"))) ,
stringsAsFactors = FALSE)
##- data
## don't show zeros
if (nozero) {
dat[dat == 0] <- NA
}
dd <- merge(dd0, dat, by = "id", all.x = TRUE)
positions.data <- merge(positions, dd[, -(2:3)], by = 'id') #, all.x = TRUE)
##- plot
## blank map
m0 <- ggplot() + geom_path(data = positions.data,
aes( x = long, y = lat,
group = group),
color = "grey", size = .1) +
coord_fixed() + theme_void()
## labels
m1 <- m0 +
## value1
geom_text(data = dd,
mapping = aes(x = long, y = lat,
label = value1,
size = value1,
# justify only if two values are plotted
hjust = ifelse(is.na(value2),
"middle",
justvalue1)),
colour = colvalue1,
# for DOM, shift labels
nudge_x = ifelse(dd$id %in% 1:6, -1e5, 0) ) +
## value2
geom_text(data = dd,
mapping = aes(x = long, y = lat,
label = value2,
size = value2,
hjust = ifelse(is.na(value1),
"middle",
justvalue2)),
colour = colvalue2,
nudge_x = ifelse(dd$id %in% 1:6, -1e5, 0) ) +
# rescale symbol size after transformation
scale_radius(range = c(3, 6)) +
theme(legend.position="none")
return(m1)
}
|
6c117eab0336443debbf004f89a21b071efa2f0f
|
7b74f00cd80694634e6925067aaeb6572b09aef8
|
/2020/Assignment-2020/Individual/FE8828-Xu Chong/Assignment2/SecretaryProblem.R
|
61103a3649501b7366f7b098068e37e9dc72d557
|
[] |
no_license
|
leafyoung/fe8828
|
64c3c52f1587a8e55ef404e8cedacbb28dd10f3f
|
ccd569c1caed8baae8680731d4ff89699405b0f9
|
refs/heads/master
| 2023-01-13T00:08:13.213027
| 2020-11-08T14:08:10
| 2020-11-08T14:08:10
| 107,782,106
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,168
|
r
|
SecretaryProblem.R
|
make_choice <- function(N, split_number) {
input_list <- sample(1:N, N, replace=FALSE)
eval_group <- input_list[1:split_number]
selc_group <- input_list[-(1:split_number)]
best <- max(eval_group)
for (i in selc_group) {
if (i>best) {
return(i)
}
}
return(0)
}
make_choice_repeat <- function(N, split_number, M) {
count <- 0
for (i in 1:M) {
if (make_choice(N, split_number)==N) {
count <- count + 1
}
}
return(count/M)
}
find_optimal <- function(N, M) {
optimal <- 0
optimal_prob <- 0
for (split_number in 1:floor(N/2)) {
prob <- make_choice_repeat(N, split_number, M)
if (prob > optimal_prob) {
optimal_prob <- prob
optimal <- split_number
}
}
return(list(optimal, optimal_prob))
}
result = find_optimal(3, 10000)
cat( "Optimal split when N=3 is ", result[[1]], ". Optimal probability is ", result[[2]], '\n' )
result = find_optimal(10, 10000)
cat( "Optimal split when N=10 is ", result[[1]], ". Optimal probability is ", result[[2]], '\n' )
result = find_optimal(100, 10000)
cat( "Optimal split when N=100 is ", result[[1]], ". Optimal probability is ", result[[2]], '\n' )
|
812658da2ad9998bbd966b789205844d996e5d1d
|
ad6dc61037b50bf1e5ab7358264ae950ce75cfa9
|
/Week11/text_classification.R
|
c5bda48c8e1d2e1a7979f724e20f96a12168106d
|
[] |
no_license
|
steviep42/INFO550
|
0cb2a3df74d99727f0487a1ef86cf90aac2392a2
|
a050910cace12e727ea822677285bfc0fa51fa44
|
refs/heads/master
| 2021-01-21T13:11:46.743545
| 2016-04-27T21:10:31
| 2016-04-27T21:10:31
| 52,405,574
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,407
|
r
|
text_classification.R
|
###
# mtcars knn primer
# We want to predict what cars are automatic or manual based on
# other attributes from the data
percent <- ceiling(nrow(mtcars)*0.80)
train.idx <- sample(1:nrow(mtcars),percent,F)
train.mtcars <- mtcars[train.idx,]
test.mtcars <- mtcars[-train.idx,]
preds <- knn(train.mtcars[,-9],test.mtcars[,-9],mtcars[train.idx,]$am)
table("Predictions" = preds, Actual= test.mtcars[,"am"])
mytrainer <- function(fraction=0.80, iterations=10) {
retlist <- list()
for (ii in 1:iterations) {
percent <- ceiling(nrow(mtcars)*fraction)
train.idx <- sample(1:nrow(mtcars),percent,F)
train.mtcars <- mtcars[train.idx,]
test.mtcars <- mtcars[-train.idx,]
preds <- knn(train.mtcars[,-9],test.mtcars[,-9],mtcars[train.idx,]$am)
out <- table("Predictions" = preds, Actual= test.mtcars[,"am"])
if (prod(dim(out)) != 4) {
accuracy <- 0
} else {
accuracy <- (out[1,1]+out[2,2])/sum(out)
}
retlist[[ii]] <- list(percent=fraction,
train=train.mtcars,
test=test.mtcars,
preds=preds,
table=out,
accuracy=accuracy)
}
return(retlist)
}
mypreds <- mytrainer()
sapply(mypreds, function(x) x$accuracy)
###
library(tm)
library(SnowballC)
library(dplyr)
setwd("~/Downloads")
# Read in the data
tweets <- read.csv("tweets.csv", stringsAsFactors=FALSE)
str(tweets)
tweets %>% filter(Avg == -1) %>% select(Tweet)
# Create dependent variable
tweets$Negative <- as.factor(tweets$Avg <= -1)
table(tweets$Negative)
# Create corpus
tweets$Tweet[1]
corpus <- Corpus(VectorSource(tweets$Tweet))
# Look at corpus
corpus
corpus[[1]]
# Convert to lower-case
corpus <- tm_map(corpus, tolower)
corpus[[1]]$content
# IMPORTANT NOTE: If you are using the latest version of the tm package, you will need to run the following line before continuing (it converts corpus to a Plain Text Document). This is a recent change having to do with the tolower function that occurred after this video was recorded.
corpus <- tm_map(corpus, PlainTextDocument)
# Remove punctuation
corpus <- tm_map(corpus, removePunctuation)
corpus[[1]]$content
# Look at stop words
stopwords("english")[1:10]
# Remove stopwords and apple
corpus <- tm_map(corpus, removeWords, c("apple", stopwords("english")))
corpus[[1]]$content
# Stem document
corpus <- tm_map(corpus, stemDocument)
corpus[[1]]$content
# Create matrix
frequencies <- DocumentTermMatrix(corpus)
frequencies
# Look at matrix
inspect(frequencies[1000:1005,505:515])
# Check for sparsity
findFreqTerms(frequencies, lowfreq=20)
# Remove sparse terms
sparse <- removeSparseTerms(frequencies, 0.995)
sparse
inspect(sparse[1000:1005,10:20])
# Convert to a data frame
tweetsSparse <- as.data.frame(as.matrix(sparse))
rownames(tweetsSparse) <- make.names(rownames(tweetsSparse),unique=TRUE)
# Make all variable names R-friendly
colnames(tweetsSparse) <- make.names(colnames(tweetsSparse),unique=TRUE)
# Add dependent variable
tweetsSparse$Negative <- tweets$Negative
# Split the data
library(caTools)
set.seed(123)
split <- sample.split(tweetsSparse$Negative, SplitRatio = 0.7)
trainSparse <- subset(tweetsSparse, split==TRUE)
testSparse <- subset(tweetsSparse, split==FALSE)
# Look at KNN
preds <- knn(trainSparse[,-310],testSparse[,-310],trainSparse[,310])
knnout <- table("Predictions" = preds, Actual= testSparse[,310])
(knnacc <- round(sum(diag(knnout))/sum(knnout),2))
# Build a CART model
library(rpart)
library(rpart.plot)
tweetCART <- rpart(Negative ~ ., data=trainSparse, method="class")
prp(tweetCART)
# Evaluate the performance of the model
predictCART <- predict(tweetCART, newdata=testSparse, type="class")
cartout <- table(actual=testSparse$Negative, predicted=predictCART)
(cartacc <- round(sum(diag(cartout))/sum(cartout),2))
# Compute accuracy
# (294+18)/(294+6+37+18)
# Baseline accuracy
table(actual=testSparse$Negative)
# 300/(300+55)
# Random forest model
library(randomForest)
set.seed(123)
tweetRF <- randomForest(Negative ~ ., data=trainSparse)
# Make predictions:
predictRF <- predict(tweetRF, newdata=testSparse)
RFout <- table(actual=testSparse$Negative, predicted=predictRF)
(RFacc <- round(sum(diag(RFout))/sum(RFout),2))
# Accuracy:
# (293+21)/(293+7+34+21)
|
530dfe354ac376e43c0a99e319ab62bbe65b5edd
|
9dd4f25e7a7c4209ce16729b91a888eb4839260d
|
/read_data.R
|
25cf921f0978b6cb55712b2989b038ecb4b05ccf
|
[] |
no_license
|
tobiasziegler/ExData_Plotting1
|
24185211ba182ae3c85c588f54e10cfe76540020
|
631fb56fdef38581c09209cf5f4a8e33c7fd1e06
|
refs/heads/master
| 2021-01-18T18:07:12.186473
| 2015-04-11T10:07:19
| 2015-04-11T10:07:19
| 33,764,754
| 0
| 0
| null | 2015-04-11T06:02:17
| 2015-04-11T06:02:16
| null |
UTF-8
|
R
| false
| false
| 1,044
|
r
|
read_data.R
|
# Reads the data, downloading it if required.
# Returns only the subset of data to be used in the plotting exercises.
read_data <- function() {
lclData <- "data/Household_power_consumption.txt"
if(!file.exists(lclData)) {
download_data()
}
hpc_data <- read.table(lclData, header = TRUE, sep = ";", na.strings = "?")
hpc_data$Date <- as.Date(hpc_data$Date, "%d/%m/%Y")
hpc_data <- hpc_data[hpc_data$Date >= "2007-02-01" & hpc_data$Date <= "2007-02-02", ]
hpc_data$Time <- paste(hpc_data$Date, hpc_data$Time, sep=" ")
hpc_data$Time <- strptime(hpc_data$Time, "%Y-%m-%d %H:%M:%S")
return(hpc_data)
}
# Downloads and unzips the data file ready for use.
download_data <- function() {
dataUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
lclZip <- "data/exdata-data-household_power_consumption.zip"
if(!file.exists("data")) {
dir.create("data")
}
if(!file.exists(lclZip)) {
download.file(dataUrl, lclZip, method = "curl")
}
unzip(lclZip, exdir = "data")
}
|
9c5b6e529e90c0b9825f2be97c504b0bf64afaff
|
1c9dc6b031f967801c894344893285542a7becae
|
/man/load_files.Rd
|
bb00b1a40e232439d751146480fea710b9747b7e
|
[
"MIT"
] |
permissive
|
Mattlk13/aceR
|
0a67f1fbc197781bd3417b4da63b02429b16a797
|
c9c11f9bfd60df6c24ce5fff6a8e2b04aebade5a
|
refs/heads/master
| 2022-06-30T03:13:27.428067
| 2022-06-20T20:38:31
| 2022-06-20T20:38:31
| 147,976,156
| 0
| 0
|
MIT
| 2020-06-30T16:20:36
| 2018-09-08T23:04:05
|
R
|
UTF-8
|
R
| false
| true
| 900
|
rd
|
load_files.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read-generic.R
\name{load_files}
\alias{load_files}
\title{Load all files in a directory as a data frame}
\usage{
load_files(path = ".", verbose = FALSE, ...)
}
\arguments{
\item{path}{a character vector of full path names; the default
corresponds to the working directory, \code{\link[base]{getwd}()}. Tilde
expansion (see \code{\link[base]{path.expand}}) is performed. Missing
values will be ignored. Elements with a marked encoding will
be converted to the native encoding (and if that fails, considered
non-existent).}
\item{verbose}{print details?}
\item{...}{see \code{\link{list.files}}}
}
\value{
all files in a directory as one data frame
}
\description{
Load all files in a directory as one data frame
}
\section{Warning}{
Use \code{\link{load_ace_bulk}} for raw formatted ACE data.
}
|
f8a1b24c8fdb512b2bbce343701f4ade7bbd1bd2
|
7a5931a74f2e8d8342fb3d6daba1a45029f6ed33
|
/r_code/correlationPlot.R
|
0a679ba93793e78d32d5106dc9186f02933c482d
|
[] |
no_license
|
nesl/netcar
|
a9fee0d6130e1f228ddc579cd849227638bbca14
|
144559e6c4ce1417ffb620a2b5fb5f362e3f7348
|
refs/heads/master
| 2021-01-22T04:49:34.546685
| 2007-05-16T17:57:06
| 2007-05-16T17:57:06
| 12,227,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,125
|
r
|
correlationPlot.R
|
correlationPlot = function(validEvents) {
source("calibrate.R")
source("binData.R")
source("markData.R")
maxTime = 600
eventFile = "event.log"
d1 = calibrate("1.log", -1)
d2 = calibrate("2.log", -1)
d3 = calibrate("3.log", -1)
# create the same bins for all the data
bd1 = binData(d1, maxTime)
bd2 = binData(d2, maxTime)
bd3 = binData(d3, maxTime)
#mark the data with events
bd1 = markData(bd1, eventFile)
bd2 = markData(bd2, eventFile)
bd3 = markData(bd3, eventFile)
bd1 = bd1[bd1$event == 3 & bd1$event == 4]
bd2 = bd2[bd2$event == 3 & bd2$event == 4]
bd3 = bd3[bd3$event == 3 & bd3$event == 4]
pairs(cbind(bd1$accel0, bd2$accel0, bd3$accel0), c("node1", "node2", "node3"), xlim=range(-1,1), ylim=range(-1,1), title="Accel0", col=bd1$event)
event = read.table(eventFile, sep = "\t", header = TRUE)
legend("bottomright", legend=unique(event$event), fill=as.numeric(unique(event$event)), inset=c(0.15, 0.05), text.width=20)
}
|
4d732121d4ce2de10653308541631d5135844c66
|
0be7bb63022754d44b5d272309109f63d7862727
|
/p/vus/vubad.r
|
c4f72f92fba7a353e1c3a8c8830efbdea6deee34
|
[] |
no_license
|
rust11/rust
|
dbb3f68d2b8eea6a5de064f644e203fa2cacd8b2
|
8d347f1415da38b679cbafe2eb0ae0332c0ff0ee
|
refs/heads/master
| 2023-03-10T06:35:41.453764
| 2023-03-08T18:26:00
| 2023-03-08T18:26:00
| 32,439,492
| 13
| 2
| null | 2022-08-12T04:10:56
| 2015-03-18T05:18:25
|
R
|
UTF-8
|
R
| false
| false
| 989
|
r
|
vubad.r
|
file vubad - bad blocks
include rid:rider
include vub:vumod
include rid:rtbad
include rid:rtcla
code cm_dir - bad block directory
ipt := cmAspc[3]
proc cm_dir
is spc : * char = ipt.Pnam
fil : * FILE
cla : * rtTcla
bad : * bdTbad
sta : WORD = 0
lim : WORD = 0
nam : int = 0
log : int = cmVopt & cmLOG_
exit vu_inv () if !*spc ; no device specified
fil = fi_opn (spc, "rb", "") ;
pass fail ;
bad = bd_alc (<>, fil) ;
rt_cla (fil, cla)
exit im_rep ("E-Error accessing device %s", spc) if fail
exit im_rep ("E-Can't scan bad blocks on %s", spc) if !cla->Vsiz
bad->Iscn.Vlim = cla->Vsiz
if cmVopt & cmSTA_
.. sta = cmIst1.Vval
if cmVopt & cmLST_
.. lim = cmIlst.Vval + 1
lim = cla->Vsiz if !lim
vu_inv () if sta ge lim
bad->Iscn.Vsta = sta
bad->Iscn.Vlim = lim
if !bd_scn (bad, (cmVopt & cmLOG_))
.. im_rep ("W-Too many bad blocks %s", spc)
if (cla->Vflg & (fcDEV_|fcDIR_|fcCON_))
.. nam = cmVopt & cmFIL_
bd_lst (bad, nam)
bd_rep (bad)
end
|
f4796e3839be212d3a9efbf3890bffa26a99f8c7
|
c0d01a4fbd349421ca2aee4a9091e14092054704
|
/code/TS_analysis_ESM.R
|
8f2f607924b94760917560dcdab2c5cca5cb7db0
|
[] |
no_license
|
lslbastos/BR_SIHSUS_StrokeSeasonality_2009_2018
|
320b70cdb81319bc8cfa167cb5cedc5a3f42a54d
|
715971da9ee2acdc11624adab8b245a646ef8b02
|
refs/heads/main
| 2023-07-04T19:25:09.849203
| 2021-08-26T14:48:07
| 2021-08-26T14:48:07
| 315,141,818
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,238
|
r
|
TS_analysis_ESM.R
|
# Libraries ---------------------------------------------------------------
library(tidyverse)
library(ggfortify)
# library(seastests)
# library(forecast)
# Input data --------------------------------------------------------------
df_stroke_series_month <- read_csv("input/files/aih_stroke_2009_2018_month.csv") %>%
map_at("regiao", ~factor(., levels = c("N", "NE", "CO", "SE", "S", "BR"),
labels = c("North", "Northeast", "Center-West", "Southeast", "South", "Brazil"), ordered = T)) %>%
bind_cols()
stl_ggplot <- function(stl_data) {
stl_autoplot <- autoplot(stl_data)
stl_autoplot$data$plot_group <- factor(stl_autoplot$data$plot_group,
levels = c("Data", "trend", "seasonal", "remainder"),
ordered = TRUE)
stl_plot <- stl_autoplot$data %>%
ggplot() +
geom_line(aes(x = Index, y = value)) +
facet_wrap(. ~ plot_group, scales = "free_y", ncol = 1) +
scale_x_date(date_breaks = "6 months",
labels = scales::date_format("%b-%Y"),
limits = as.Date(c('2008-12-31','2019-01-01')), ) +
labs(x = "", y = "") +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
return(stl_plot)
}
# TS data -----------------------------------------------------------------
# List of TSs objects
ls_ts_regions <- df_stroke_series_month %>%
split(.$regiao) %>%
map(~ts(pull(., age_hosp_rate),
start = c(2009, 01),
frequency = 12)
)
# List of STL objects for each TS
ls_ts_stl_obj <- ls_ts_regions %>%
map(~stl(., s.window = "periodic"))
# List of STL plots
ls_stl_plot <- ls_ts_stl_obj %>%
imap(~stl_ggplot(.x) +
labs(title = .y))
# Export STL plts
ls_stl_plot %>%
iwalk(~ggsave(paste0("output/figures/Month/plot_stl", .y,"_month_smooth.png"), .x,
height = 8, width = 7, dpi = 800, units = "in"))
|
542a35893a6a97dc6b589bcfb322eb2d6a8dc35c
|
e0592e443c2f47d1332b37fdd13c063d4aa0359b
|
/tests/testthat.R
|
721d0d2fb588d337239c31d96a0b2806129efe66
|
[] |
no_license
|
abifromr/apihelpers
|
1b308059deaf9978dfc3ec149dd65cb969f019ce
|
34e2156ed0107be3a7da7ef32e58888b46378711
|
refs/heads/master
| 2021-08-19T18:03:10.932107
| 2020-05-20T05:20:12
| 2020-05-20T05:20:12
| 184,191,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(apihelpers)
test_check("apihelpers")
|
170d1e7b792be3490f1e9aca5c09d159a86211ad
|
47c9165cc535c495176c1fc70a60eed66bcdbbaf
|
/forecast/data/gas.R
|
13b828493660b9f72c90ce38ce211040666930e5
|
[] |
no_license
|
cran/forecasting
|
5494260769c6ff530798629d8a4847cbf95bb5b5
|
a8f4142627b791448edb1a4df55bf6747ce8785c
|
refs/heads/master
| 2020-04-26T09:35:36.000252
| 2009-08-30T00:00:00
| 2009-08-30T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,272
|
r
|
gas.R
|
gas<-ts(c(1709, 1646, 1794, 1878, 2173, 2321, 2468, 2416, 2184,
2121, 1962, 1825, 1751, 1688, 1920, 1941, 2311, 2279, 2638,
2448, 2279, 2163, 1941, 1878, 1773, 1688, 1783, 1984, 2290,
2511, 2712, 2522, 2342, 2195, 1931, 1910, 1730, 1688, 1899,
1994, 2342, 2553, 2712, 2627, 2363, 2311, 2026, 1910, 1762,
1815, 2005, 2089, 2617, 2828, 2965, 2891, 2532, 2363, 2216,
2026, 1804, 1773, 2015, 2089, 2627, 2712, 3007, 2880, 2490,
2237, 2205, 1984, 1868, 1815, 2047, 2142, 2743, 2775, 3028,
2965, 2501, 2501, 2131, 2015, 1910, 1868, 2121, 2268, 2690,
2933, 3218, 3028, 2659, 2406, 2258, 2057, 1889, 1984, 2110,
2311, 2785, 3039, 3229, 3070, 2659, 2543, 2237, 2142, 1962,
1910, 2216, 2437, 2817, 3123, 3345, 3112, 2659, 2469, 2332,
2110, 1910, 1941, 2216, 2342, 2923, 3229, 3513, 3355, 2849,
2680, 2395, 2205, 1994, 1952, 2290, 2395, 2965, 3239, 3608,
3524, 3018, 2648, 2363, 2247, 1994, 1941, 2258, 2332, 3323,
3608, 3957, 3672, 3155, 2933, 2585, 2384, 2057, 2100, 2458,
2638, 3292, 3724, 4652, 4379, 4231, 3756, 3429, 3461, 3345,
4220, 4874, 5064, 5951, 6774, 7997, 7523, 7438, 6879, 6489,
6288, 5919, 6183, 6594, 6489, 8040, 9715, 9714, 9756, 8595,
7861, 7753, 8154, 7778, 7402, 8903, 9742, 11372, 12741, 13733,
13691, 12239, 12502, 11241, 10829, 11569, 10397, 12493, 11962,
13974, 14945, 16805, 16587, 14225, 14157, 13016, 12253, 11704,
12275, 13695, 14082, 16555, 17339, 17777, 17592, 16194, 15336,
14208, 13116, 12354, 12682, 14141, 14989, 16159, 18276, 19157,
18737, 17109, 17094, 15418, 14312, 13260, 14990, 15975, 16770,
19819, 20983, 22001, 22337, 20750, 19969, 17293, 16498, 15117,
16058, 18137, 18471, 21398, 23854, 26025, 25479, 22804, 19619,
19627, 18488, 17243, 18284, 20226, 20903, 23768, 26323, 28038,
26776, 22886, 22813, 22404, 19795, 18839, 18892, 20823, 22212,
25076, 26884, 30611, 30228, 26762, 25885, 23328, 21930, 21433,
22369, 24503, 25905, 30605, 34984, 37060, 34502, 31793, 29275,
28305, 25248, 27730, 27424, 32684, 31366, 37459, 41060, 43558,
42398, 33827, 34962, 33480, 32445, 30715, 30400, 31451, 31306,
40592, 44133, 47387, 41310, 37913, 34355, 34607, 28729, 26138,
30745, 35018, 34549, 40980, 42869, 45022, 40387, 38180, 38608,
35308, 30234, 28801, 33034, 35294, 33181, 40797, 42355, 46098,
42430, 41851, 39331, 37328, 34514, 32494, 33308, 36805, 34221,
41020, 44350, 46173, 44435, 40943, 39269, 35901, 32142, 31239,
32261, 34951, 38109, 43168, 45547, 49568, 45387, 41805, 41281,
36068, 34879, 32791, 34206, 39128, 40249, 43519, 46137, 56709,
52306, 49397, 45500, 39857, 37958, 35567, 37696, 42319, 39137,
47062, 50610, 54457, 54435, 48516, 43225, 42155, 39995, 37541,
37277, 41778, 41666, 49616, 57793, 61884, 62400, 50820, 51116,
45731, 42528, 40459, 40295, 44147, 42697, 52561, 56572, 56858,
58363, 45627, 45622, 41304, 36016, 35592, 35677, 39864, 41761,
50380, 49129, 55066, 55671, 49058, 44503, 42145, 38698, 38963,
38690, 39792, 42545, 50145, 58164, 59035, 59408, 55988, 47321,
42269, 39606, 37059, 37963, 31043, 41712, 50366, 56977, 56807,
54634, 51367, 48073, 46251, 43736, 39975, 40478, 46895, 46147,
55011, 57799, 62450, 63896, 57784, 53231, 50354, 38410, 41600,
41471, 46287, 49013, 56624, 61739, 66600, 60054),start=1956,frequency=12)
|
fa41bfbc91cc90e88606f808a1284a132bf81422
|
8eea125de70bd5268a39e2f4f767e10f6b6b6007
|
/plot4.R
|
ff486a5cf123169666c11a28ef23c985543b2596
|
[] |
no_license
|
logo403/ExData_Plotting1
|
9ef6918e611d934884de740548359fe67be77f95
|
ee9ad452719510a843b2ad3b211c4bf150199ac3
|
refs/heads/master
| 2021-01-14T09:42:05.078791
| 2015-10-08T02:27:09
| 2015-10-08T02:27:09
| 43,795,649
| 0
| 0
| null | 2015-10-07T04:48:04
| 2015-10-07T04:48:03
| null |
UTF-8
|
R
| false
| false
| 1,599
|
r
|
plot4.R
|
# plot 4
library(lubridate)
library(dplyr)
setwd("C:/Users/logo403/Documents/R/work/dataexploratory/project1/")
Url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(Url, destfile = "power.zip")
#To read the date directly from the file
class <- c("character","character", rep("numeric",7))
power <- read.table(unz("power.zip", "household_power_consumption.txt"),
na.strings="?",quote="",colClasses= class, header=T, sep=";")
power <- power %>% mutate(Date = dmy_hms(paste(power$Date,power$Time))) %>%
filter(Date >= ymd("2007-02-01") & Date < ymd("2007-02-03")) %>%
select(-Time)
#Plot 4
png(file="plot4.png")
par(mfrow=c(2,2))
#1.1
with(power, plot(Date,Global_active_power, type = "n",
ylab="Global Active Power",xlab=""))
lines(power$Date,power$Global_active_power)
#1.2
with(power, plot(Date,Voltage, type = "n",
ylab="Voltage",xlab="datetime"))
lines(power$Date,power$Voltage)
#2.1
with(power, plot(Date,power$Sub_metering_1, type = "n",
ylab="Energy sub metering",xlab=""))
lines(power$Date,power$Sub_metering_1)
lines(power$Date,power$Sub_metering_2, col="red")
lines(power$Date,power$Sub_metering_3, col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
bty="n", cex=0.9,lty=c(1,1), col = c("black","red","blue"))
#2.2
with(power, plot(Date,Global_reactive_power, type = "n",
ylab="Global_reactive_power",xlab="datetime"))
lines(power$Date,power$Global_reactive_power)
dev.off()
|
0431932b787d0c9cd368969be0e3ab14768264fb
|
cc30a83a5d911dd895f89e427118f6647cacbe74
|
/plot3.R
|
5c0d0fd4b06d8d529682727a1079139cf7ed4b49
|
[] |
no_license
|
alejoxps/ExData_Plotting1
|
4772ebcfaa0438ffa4ccf122f8a40485fc9f7eb4
|
42ea2616bec8ce7e6aeab8f571bbcd092e86ac88
|
refs/heads/master
| 2020-12-13T12:52:32.886234
| 2014-05-11T22:18:00
| 2014-05-11T22:18:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,084
|
r
|
plot3.R
|
#reading data
h_p_c <- read.table("household_power_consumption.txt", sep=";",dec=".",header=TRUE,stringsAsFactors=FALSE)
h_p_c_sub = subset(h_p_c, as.Date(Date, "%d/%m/%Y") =='2007-02-01'|as.Date(Date, "%d/%m/%Y") == '2007-02-02')
#Set Graphic device
png(filename = "plot3.png",
width = 480, height = 480, units = "px")
#background
par(bg="transparent")
#datetime var
dat<-paste(h_p_c_sub$Date,h_p_c_sub$Time)
#base plot
plot(strptime(dat ,format="%d/%m/%Y %H:%M:%S"),as.numeric(h_p_c_sub$Sub_metering_1),type="n",xlab="",ylab="Energy sub metering",main="",col="black")
#plotting lines
lines(strptime(dat ,format="%d/%m/%Y %H:%M:%S"),as.numeric(h_p_c_sub$Sub_metering_1),type="l",col="black")
lines(strptime(dat ,format="%d/%m/%Y %H:%M:%S"),as.numeric(h_p_c_sub$Sub_metering_2),type="l",col="red")
lines(strptime(dat ,format="%d/%m/%Y %H:%M:%S"),as.numeric(h_p_c_sub$Sub_metering_3),type="l",col="blue")
#apply legend
legend("topright",lwd=c(1,1,1), col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
#closing Graphic device
dev.off()
|
58111fdcff8dfcd2d2e945af49566f202cdf7537
|
49274f1e603427e17419a480910df649680a02fc
|
/inst/create_r.R
|
25c031d8a766133101464629172818ef77e81e12
|
[] |
no_license
|
t-arae/prtclmisc
|
da7b21d9122f5b1a75565f9da1faad4b9e219555
|
08203daa321f63524562d33cca81db976febb1b6
|
refs/heads/master
| 2021-07-12T06:57:15.555934
| 2020-08-25T07:28:21
| 2020-08-25T07:28:21
| 195,718,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,176
|
r
|
create_r.R
|
### unitsパッケージのヘルパー関数を定義するRファイルを作成。
### 濃度計算を楽に行うため。
# install.packages("units")
library(units)
library(tidyverse)
g_family <- c("kg", "g", "mg", "ug", "ng", "pg")
template <- read_lines("inst/template.txt")
g_functions <-
map(g_family, ~ str_replace_all(template, "unit_is_here", .x)) %>%
map2(g_family, ~ str_replace_all(.x, "unit_is__here", .y)) %>%
unlist
write_lines(g_functions, path = "R/g.R")
mol_family <- c("mol", "mmol", "umol", "nmol", "pmol")
mol_functions <-
map(mol_family, ~ str_replace_all(template, "unit_is_here", .x)) %>%
map2(mol_family, ~ str_replace_all(.x, "unit_is__here", .y)) %>%
unlist
write_lines(mol_functions, path = "R/mol.R")
mw <- c("mw")
mw_function <-
template %>%
str_replace("unit_is_here", "mw") %>%
str_replace("unit_is__here", "g/mol")
write_lines(mw_function, path = "R/mw.R")
L_family <- c("L", "mL", "uL")
L_functions <-
map(L_family, ~ str_replace_all(template, "unit_is_here", .x)) %>%
map2(L_family, ~ str_replace_all(.x, "unit_is__here", .y)) %>%
unlist
write_lines(L_functions, path = "R/L.R")
g_conc_family2 <- c("g/L", "mg/L", "ug/L", "g/mL", "mg/mL", "ug/mL", "ng/mL", "ug/uL", "ng/uL", "pg/uL")
g_conc_family1 <- g_conc_family2 %>% str_remove("/")
g_conc_functions <-
map(g_conc_family1, ~ str_replace(template, "unit_is_here", .x)) %>%
map2(g_conc_family2, ~ str_replace(.x, "unit_is__here", .y)) %>%
unlist()
write_lines(g_conc_functions, path = "R/g_conc.R")
M_family1 <- c("M", "mM", "uM", "nM", "pM")
M_family2 <- c("mol/L", "mmol/L", "umol/L", "nmol/L", "pmol/L")
M_functions <-
map(M_family1, ~ str_replace(template, "unit_is_here", .x)) %>%
map2(M_family2, ~ str_replace(.x, "unit_is__here", .y)) %>%
unlist()
write_lines(M_functions, path = "R/M.R")
percent_family1 <- c("ww_percent", "wv_percent", "vv_percent")
percent_family2 <- c("g/100g", "g/100mL", "mL/100mL")
percent_functions <-
map(percent_family1, ~ str_replace(template, "unit_is_here", .x)) %>%
map2(percent_family2, ~ str_replace(.x, "unit_is__here", .y)) %>%
unlist()
write_lines(percent_functions, path = "R/percent.R")
|
b635e5cb10fe48a16c9e6a04ddfa63accecf9a00
|
85d9ea52729abc548471d35357d9317e2eca8d81
|
/man/am.checky.Rd
|
2bb9469ba698aaaf693b3e1ef1c2f7297d858ccf
|
[] |
no_license
|
ameshkoff/amfeat
|
57666f6bdda30d8e3ca0a639847427b47d71a8cb
|
d9a3d4749468a9b591ff37f00bf2af7e96a4f473
|
refs/heads/master
| 2021-01-19T00:25:39.058343
| 2016-11-25T14:24:14
| 2016-11-25T14:24:14
| 73,109,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,241
|
rd
|
am.checky.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/am.checky.r
\name{am.checky}
\alias{am.checky}
\title{Correlations between dependent variable (Y) and every independent variable}
\usage{
am.checky(ds, ds.list, ds.y, corr.type = "spearman", cl.number = 1,
out.file = "")
}
\arguments{
\item{ds}{data.table: data set}
\item{ds.list}{character vector: names of independent variables (X)}
\item{ds.y}{character: name of the dependent variable (Y)}
\item{corr.type}{character: correlation types; "spearman" (default) and "pearson" available; see Hmisc rcorr for details}
\item{cl.number}{number: cluster number for parallel computers; be very careful with this parameter! do not set it too big}
\item{out.file}{character: absolute or relative path to the files with output; due to parallel computations you do NOT see most of the info in the console; id default no outputs are used}
}
\value{
Data.table. rn - feature name, cr - correlation value
}
\description{
Define pairwise correlations between the dependant variable (Y) and every independent variable (Xs). Be careful! Parallel computation in use.
}
\seealso{
You can use this data to choose and create new features you prefer with am.calcf function
}
|
30b522495614fcc91249fe9df210e847bd198912
|
e78dc2f685859e31d2e33bd14e46772669c6d678
|
/cachematrix.R
|
b6537f608a73e0f01c0f71679164a7dc2119f9b5
|
[] |
no_license
|
retropc66/ProgrammingAssignment2
|
5e96468e02d28a6fed41462a7f60448ee156fc23
|
0895feeea313cbc4fcbb9c76f05b64df1b4f84b3
|
refs/heads/master
| 2020-12-24T15:58:09.840298
| 2015-08-21T20:06:05
| 2015-08-21T20:06:05
| 40,490,364
| 0
| 0
| null | 2015-08-10T15:32:28
| 2015-08-10T15:32:27
| null |
UTF-8
|
R
| false
| false
| 1,215
|
r
|
cachematrix.R
|
## These functions create an object to store a matrix and to calculate and
## cache its inverse matrix. When the inverse matrix is calculated for the first
## time, it is stored in the object, and can be recalled rather than recalculated
## the next time its value is required.
## makeCacheMatrix instantiates an object containing the matrix and a list of functions
## to set and retrieve the matrix, and cache and retrieve its inverse
makeCacheMatrix <- function(x = matrix()) {
inverse<-NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inv) inverse <<- inv
getinverse <- function() inverse
list (set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve operates on an objectcreated by the makeCacheMatrix function. If the object
## already contains a cached inverse matrix, the cached inverse will be returned. If there
## is no cached inverse, and inverse matrix is calculated using 'solve', and cached in the
## object.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if (!is.null(inv)){
message("Getting cached matrix")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setinverse(inv)
inv
}
|
62d23ca7bd46b2807205d12d1aecf953b0315db4
|
5072cf394a8dd2bcae7f05556344751485651abc
|
/man/scatterplotDL.Rd
|
050eb3abe634915510c2295d2285bd03fd43b741
|
[] |
no_license
|
cran/oaPlots
|
838a858dcf4e606cfcdb3d28e63c34e524dfd0eb
|
46714ee30c71efc8ddbdaf0309212be8b0d43163
|
refs/heads/master
| 2016-08-11T15:21:15.248062
| 2015-11-30T14:51:34
| 2015-11-30T14:51:34
| 48,085,147
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,523
|
rd
|
scatterplotDL.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/densityLegend.R
\name{scatterplotDL}
\alias{scatterplotDL}
\title{Plot a base-graphics scatterplot with accompanying density legend}
\usage{
scatterplotDL(x, y, colorVar, colorPalette, side = "right",
proportion = 0.3, legendTitle = NULL, ...)
}
\arguments{
\item{x}{the x coordinates to be handed to plot()}
\item{y}{the y coordinates of points in the plot()}
\item{colorVar}{the numeric vector of values used to color the points}
\item{colorPalette}{a color palette. If 'colorPalette' contains, for example,
6 colors, then the values of colorVar will be split and assigned to these 6
colors}
\item{side}{the side of the plot to put the density legend on ("left", "right",
"top", or "bottom")}
\item{proportion}{the proportion of the plot (from 0 to 1) to allocate to the
density legend (defaults to 0.3)}
\item{legendTitle}{string for labelling the density legend}
\item{...}{additional parameters to be passed to plot()}
}
\value{
none, plot is added to device
}
\description{
Plot a base-graphics scatterplot with accompanying density legend
}
\examples{
library(ggplot2)
library(RColorBrewer)
colorPalette <- brewer.pal(9, "YlOrRd")[4:9]
scatterplotDL(x = mtcars$mpg, y = mtcars$wt, colorVar = mtcars$hp,
legendTitle = "Horse Power", colorPalette = colorPalette, pch = 19,
xlab = "MPG (miles per gallon)", ylab = "Weight (tonnes)",
main = "MPG by Weight in Cars \\n Colored by Horse Power")
}
\author{
Jason Waddell
}
|
67b4912f2b8c0332c0fef6334a7165a0f12eaf61
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/TAM/R/tam.latreg.R
|
7a0712b94b32d6d69f27917cd635d87aa315d8f0
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,740
|
r
|
tam.latreg.R
|
###################################################################
# latent regression
tam.latreg <- function( like , theta=NULL , Y=NULL , group=NULL ,
formulaY = NULL , dataY = NULL ,
beta.fixed = NULL , beta.inits = NULL ,
variance.fixed = NULL , variance.inits = NULL ,
est.variance = TRUE , pweights = NULL , pid=NULL ,
userfct.variance = NULL , variance.Npars = NULL ,
control = list()
){
s1 <- Sys.time()
CALL <- match.call()
# display
disp <- "....................................................\n"
increment.factor <- progress <- nodes <- snodes <- ridge <- xsi.start0 <- QMC <- NULL
maxiter <- conv <- convD <- min.variance <- max.increment <- Msteps <- convM <- NULL
pweightsM <- R <- NULL
# attach control elements
e1 <- environment()
con <- list( convD = .001 ,conv = .0001 , snodes=0 , convM = .0001 , Msteps = 4 ,
maxiter = 1000 ,min.variance = .001 , progress = TRUE , ridge=0 ,
seed = NULL )
#a0 <- Sys.time()
con[ names(control) ] <- control
Lcon <- length(con)
con1a <- con1 <- con ;
names(con1) <- NULL
for (cc in 1:Lcon ){
assign( names(con)[cc] , con1[[cc]] , envir = e1 )
}
if ( is.null(theta) ){
theta <- attr( like , "theta" )
}
nodes <- theta
ndim <- ncol(theta)
if (progress){
cat(disp)
cat("Processing Data ", paste(Sys.time()) , "\n") ;
utils::flush.console()
}
if ( ! is.null(group) ){
con1a$QMC <- QMC <- FALSE
con1a$snodes <- snodes <- 0
}
if ( !is.null(con$seed)){ set.seed( con$seed ) }
nullY <- is.null(Y)
nstud <- nrow(like) # number of students
if ( is.null( pweights) ){
pweights <- rep(1,nstud) # weights of response pattern
}
if (progress){
cat(" * Response Data:" , nstud , "Persons \n" ) ;
utils::flush.console()
}
#!! check dim of person ID pid
if ( is.null(pid) ){
pid <- seq(1,nstud)
} else {
pid <- unname(c(unlist(pid)))
}
# normalize person weights to sum up to nstud
pweights <- nstud * pweights / sum(pweights)
betaConv <- FALSE #flag of regression coefficient convergence
varConv <- FALSE #flag of variance convergence
# nnodes <- length(nodes)^ndim
nnodes <- nrow(nodes)
if ( snodes > 0 ){ nnodes <- snodes }
#****
# display number of nodes
if (progress ){
l1 <- paste0( " * ")
if (snodes==0){ l1 <- paste0(l1 , "Numerical integration with ")}
else{
if (QMC){
l1 <- paste0(l1 , "Quasi Monte Carlo integration with ")
} else {
l1 <- paste0(l1 , "Monte Carlo integration with ")
}
}
cat( paste0( l1 , nnodes , " nodes\n") )
if (nnodes > 8000){
cat(" @ Are you sure that you want so many nodes?\n")
cat(" @ Maybe you want to use Quasi Monte Carlo integration with fewer nodes.\n")
}
}
#*********
# variance inits
# initialise conditional variance
if ( !is.null( variance.inits ) ){
variance <- variance.inits
} else variance <- diag( ndim )
if ( !is.null(variance.fixed) ){
variance[ variance.fixed[,1:2 ,drop=FALSE] ] <- variance.fixed[,3]
variance[ variance.fixed[,c(2,1) ,drop=FALSE] ] <- variance.fixed[,3]
}
# group indicators for variance matrix
if ( ! is.null(group) ){
groups <- sort(unique(group))
G <- length(groups)
group <- match( group , groups )
var.indices <- rep(1,G)
for (gg in 1:G){
var.indices[gg] <- which( group == gg )[1]
}
} else {
G <- 1
groups <- NULL
}
# beta inits
# (Y'Y)
if ( ! is.null( formulaY ) ){
formulaY <- stats::as.formula( formulaY )
Y <- stats::model.matrix( formulaY , dataY )[,-1] # remove intercept
nullY <- FALSE
}
# if ( ! is.null(Y) ){
if (! nullY){
Y <- as.matrix(Y)
nreg <- ncol(Y)
if ( is.null( colnames(Y) ) ){
colnames(Y) <- paste("Y" , 1:nreg , sep="")
}
if ( ! nullY ){
Y <- cbind(1,Y) #add a "1" column for the Intercept
colnames(Y)[1] <- "Intercept"
}
} else
{
Y <- matrix( 1 , nrow=nstud , ncol=1 )
nreg <- 0
}
if ( G > 1 & nullY ){
Y <- matrix( 0 , nstud , G )
# colnames(Y) <- paste("group" , 1:G , sep="")
colnames(Y) <- paste("group" , groups , sep="")
for (gg in 1:G){ Y[,gg] <- 1*(group==gg) }
nreg <- G - 1
}
# W <- t(Y * pweights) %*% Y
W <- crossprod(Y * pweights , Y )
if (ridge > 0){ diag(W) <- diag(W) + ridge }
YYinv <- solve( W )
#initialise regressors
# if ( is.null(beta.fixed) ){
# beta.fixed <- matrix( c(1,1,0) , nrow= 1)
# if ( ndim > 1){
# for ( dd in 2:ndim){
# beta.fixed <- rbind( beta.fixed , c( 1 , dd , 0 ) )
# }}}
#****
if( ! is.matrix(beta.fixed) ){
if ( ! is.null(beta.fixed) ){
if ( ! beta.fixed ){ beta.fixed <- NULL }
}
}
#****
beta <- matrix(0, nrow = nreg+1 , ncol = ndim)
if ( ! is.null( beta.inits ) ){
beta[ beta.inits[,1:2] ] <- beta.inits[,3]
}
beta.min.deviance <- beta
variance.min.deviance <- variance
# cat("b200"); a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
# nodes
if ( snodes == 0 ){
# theta <- as.matrix( expand.grid( as.data.frame( matrix( rep(nodes, ndim) , ncol = ndim ) ) ) )
#we need this to compute sumsig2 for the variance
# theta2 <- matrix(theta.sq(theta), nrow=nrow(theta),ncol=ncol(theta)^2)
theta2 <- matrix(theta.sq2(theta), nrow=nrow(theta),ncol=ncol(theta)^2)
# grid width for calculating the deviance
thetawidth <- diff(theta[,1] )
thetawidth <- ( ( thetawidth[ thetawidth > 0 ])[1] )^ndim
thetasamp.density <- NULL
} else {
# sampled theta values
if (QMC){
r1 <- sfsmisc::QUnif(n=snodes, min = 0, max = 1, n.min = 1, p=ndim, leap = 409)
theta0.samp <- stats::qnorm( r1 )
} else {
theta0.samp <- matrix( MASS::mvrnorm( snodes , mu = rep(0,ndim) ,
Sigma = diag(1,ndim ) ) ,
nrow= snodes , ncol=ndim )
}
thetawidth <- NULL
}
deviance <- 0
deviance.history <- matrix( 0 , nrow=maxiter , ncol = 2)
colnames(deviance.history) <- c("iter" , "deviance")
deviance.history[,1] <- 1:maxiter
iter <- 0
a02 <- a1 <- 999 # item parameter change
a4 <- 0
YSD <- max( apply( Y , 2 , stats::sd ) )
if (YSD > 10^(-15) ){ YSD <- TRUE } else { YSD <- FALSE }
# define progress bar for M step
# mpr <- round( seq( 1 , np , len = 10 ) )
hwt.min <- 0
deviance.min <- 1E100
itemwt.min <- 0
nomiss <- TRUE
Variance.fixed <- variance.fixed
res.hwt <- list()
##############################################################
#Start EM loop here
while ( ( (!betaConv | !varConv) | ((a1 > conv) | (a4 > conv) | (a02 > convD)) ) & (iter < maxiter) ) {
iter <- iter + 1
if (progress){
cat(disp)
cat("Iteration" , iter , " " , paste( Sys.time() ) )
cat("\nE Step\n") ;
utils::flush.console()
}
# calculate nodes for Monte Carlo integration
if ( snodes > 0){
# theta <- beta[ rep(1,snodes) , ] + t ( t(chol(variance)) %*% t(theta0.samp) )
theta <- beta[ rep(1,snodes) , ] + theta0.samp %*% chol(variance)
# calculate density for all nodes
thetasamp.density <- mvtnorm::dmvnorm( theta , mean = as.vector(beta[1,]) , sigma = variance )
# recalculate theta^2
# theta2 <- matrix( theta.sq(theta) , nrow=nrow(theta) , ncol=ncol(theta)^2 )
theta2 <- matrix( theta.sq2(theta) , nrow=nrow(theta) , ncol=ncol(theta)^2 )
}
olddeviance <- deviance
# a0 <- Sys.time()
#***
# print(AXsi)
# AXsi[ is.na(AXsi) ] <- 0
# print(AXsi)
# cat("calc_prob") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
# calculate student's prior distribution
gwt <- stud_prior.v2(theta=theta , Y=Y , beta=beta , variance=variance , nstud=nstud ,
nnodes=nnodes , ndim=ndim,YSD=YSD, unidim_simplify=FALSE)
# compute posterior
hwt <- like * gwt
res.hwt$rfx <- rowSums(hwt)
hwt <- hwt / rowSums(hwt)
# cat("calc_posterior") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
if (progress){
cat("M Step Intercepts |")
utils::flush.console()
}
oldbeta <- beta
oldvariance <- variance
# cat("before mstep regression") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
# M step: estimation of beta and variance
resr <- latreg.mstep.regression( hwt ,
pweights , pweightsM , Y , theta , theta2 , YYinv , ndim ,
nstud , beta.fixed , variance , Variance.fixed , group , G ,
snodes = snodes , thetasamp.density=thetasamp.density , nomiss=FALSE)
# cat("mstep regression") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
beta <- resr$beta
variance <- resr$variance
if( ndim == 1 ){ # prevent negative variance
variance[ variance < min.variance ] <- min.variance
}
itemwt <- resr$itemwt
# constraint cases (the design matrix A has no constraint on items)
if ( max(abs(beta-oldbeta)) < conv){
betaConv <- TRUE # not include the constant as it is constrained
}
if (G == 1){
diag(variance) <- diag(variance) + 10^(-10)
}
# function for reducing the variance
if ( ! is.null( userfct.variance ) ){
variance <- do.call( userfct.variance , list(variance ) )
}
if (max(abs(variance-oldvariance)) < conv) varConv <- TRUE
# calculate deviance
if ( snodes == 0 ){
deviance <- - 2 * sum( pweights * log( res.hwt$rfx * thetawidth ) )
# deviance <- - 2 * sum( pweights * log( res.hwt$like * thetawidth ) )
} else {
# deviance <- - 2 * sum( pweights * log( res.hwt$rfx ) )
deviance <- - 2 * sum( pweights * log( rowMeans( res.hwt$swt ) ) )
}
deviance.history[iter,2] <- deviance
a01 <- abs( ( deviance - olddeviance ) / deviance )
a02 <- abs( ( deviance - olddeviance ) )
if( deviance > deviance.min ){
beta.min.deviance <- beta.min.deviance
variance.min.deviance <- variance.min.deviance
hwt.min <- hwt.min
deviance.min <- deviance.min
} else {
beta.min.deviance <- beta
variance.min.deviance <- variance
hwt.min <- hwt
deviance.min <- deviance
}
a1 <- 0
a2 <- max( abs( beta - oldbeta ))
a3 <- max( abs( variance - oldvariance ))
if (progress){
cat( paste( "\n Deviance =" , round( deviance , 4 ) ))
devch <- -( deviance - olddeviance )
cat( " | Deviance change:", round( devch , 4 ) )
if ( devch < 0 & iter > 1 ){
cat ("\n!!! Deviance increases! !!!!")
cat ("\n!!! Choose maybe fac.oldxsi > 0 and/or increment.factor > 1 !!!!")
}
cat( "\n Maximum regression parameter change:" , round( a2 , 6 ) )
if ( G == 1 ){
cat( "\n Variance: " , round( variance[ ! lower.tri(variance)] , 4 ) , " | Maximum change:" , round( a3 , 6 ) )
} else {
cat( "\n Variance: " , round( variance[var.indices] , 4 ) ,
" | Maximum change:" , round( a3 , 6 ) )
}
cat( "\n beta ",round(beta,4) )
cat( "\n" )
utils::flush.console()
}
# cat("rest") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
} # end of EM loop
#******************************************************
beta.min.deviance -> beta
variance.min.deviance -> variance
hwt.min -> hwt
deviance.min -> deviance
##*** Information criteria
ic <- latreg_TAM.ic( nstud , deviance ,
beta , beta.fixed , ndim , variance.fixed , G ,
est.variance , variance.Npars=NULL , group )
#####################################################
# post ... posterior distribution
# create a data frame person
person <- data.frame( "pid"=pid , "case" = 1:nstud , "pweight" = pweights )
# person$score <- rowSums( resp * resp.ind )
# use maxKi here; from "design object"
nstudl <- rep(1,nstud)
# person$max <- rowSums( outer( nstudl , apply( resp ,2 , max , na.rm=TRUE) ) * resp.ind )
# calculate EAP
# EAPs are only computed in the unidimensional case for now,
# but can be easily adapted to the multidimensional case
if ( snodes == 0 ){
hwtE <- hwt
} else {
# hwtE <- hwt / snodes
hwtE <- hwt
}
if ( ndim == 1 ){
person$EAP <- rowSums( hwtE * outer( nstudl , theta[,1] ) )
person$SD.EAP <- sqrt( rowSums( hwtE * outer( nstudl , theta[,1]^2 ) ) - person$EAP^2)
#***
# calculate EAP reliability
# EAP variance
EAP.variance <- stats::weighted.mean( person$EAP^2 , pweights ) - ( stats::weighted.mean( person$EAP , pweights ) )^2
EAP.error <- stats::weighted.mean( person$SD.EAP^2 , pweights )
EAP.rel <- EAP.variance / ( EAP.variance + EAP.error )
} else {
EAP.rel <- rep(0,ndim)
names(EAP.rel) <- paste("Dim",1:ndim , sep="")
for ( dd in 1:ndim ){
# dd <- 1 # dimension
person$EAP <- rowSums( hwtE * outer( nstudl , theta[,dd] ) )
person$SD.EAP <- sqrt(rowSums( hwtE * outer( nstudl , theta[,dd]^2 ) ) - person$EAP^2)
#***
# calculate EAP reliability
# EAP variance
EAP.variance <- stats::weighted.mean( person$EAP^2 , pweights ) - ( stats::weighted.mean( person$EAP , pweights ) )^2
EAP.error <- stats::weighted.mean( person$SD.EAP^2 , pweights )
EAP.rel[dd] <- EAP.variance / ( EAP.variance + EAP.error )
colnames(person)[ which( colnames(person) == "EAP" ) ] <- paste("EAP.Dim" , dd , sep="")
colnames(person)[ which( colnames(person) == "SD.EAP" ) ] <- paste("SD.EAP.Dim" , dd , sep="")
}
# person <- data.frame( "pid" = pid , person )
}
#cat("person parameters") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
############################################################
s2 <- Sys.time()
if (progress){
cat(disp)
cat("Regression Coefficients\n")
print( beta , 4 )
cat("\nVariance:\n" ) # , round( varianceM , 4 ))
if (G==1 ){
varianceM <- matrix( variance , nrow=ndim , ncol=ndim )
print( varianceM , 4 )
} else {
print( variance[ var.indices] , 4 ) }
if ( ndim > 1){
cat("\nCorrelation Matrix:\n" ) # , round( varianceM , 4 ))
print( cov2cor(varianceM) , 4 )
}
cat("\n\nEAP Reliability:\n")
print( round (EAP.rel,3) )
cat("\n-----------------------------")
devmin <- which.min( deviance.history[,2] )
if ( devmin < iter ){
cat(paste("\n\nMinimal deviance at iteration " , devmin ,
" with deviance " , round(deviance.history[ devmin , 2 ],3) , sep="") , "\n")
cat("The corresponding estimates are\n")
cat(" xsi.min.deviance\n beta.min.deviance \n variance.min.deviance\n\n")
}
cat( "\nStart: " , paste(s1))
cat( "\nEnd: " , paste(s2),"\n")
print(s2-s1)
cat( "\n" )
}
# Output list
deviance.history <- deviance.history[ 1:iter , ]
res <- list( "beta" = beta , "variance" = variance ,
"person" = person , pid = pid , "EAP.rel" = EAP.rel ,
"post" = hwt , "theta" = theta ,
"Y" = Y , "group" = group ,
"G" = if ( is.null(group)){1} else { length(unique( group ) )} ,
"groups" = if ( is.null(group)){1} else { groups } ,
"formulaY" = formulaY , "dataY" = dataY ,
"pweights" = pweights ,
"time" = c(s1,s2,s2-s1) ,
"nstud" = nstud ,
"hwt" = hwt , "like" = like ,
"ndim" = ndim ,
"beta.fixed" = beta.fixed ,
"variance.fixed" = variance.fixed ,
"nnodes" = nnodes , "deviance" = deviance ,
"ic" = ic ,
"deviance.history" = deviance.history ,
"control" = con1a , "iter" = iter ,
"YSD"=YSD , CALL = CALL
)
class(res) <- "tam.latreg"
return(res)
}
# tam.mml.output <- function(){
# }
|
09e83d63411754929e0afcdd819c4868998a16c1
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Numerical_Methods_In_Finance_And_Economics:_A_Matlab-Based_Introduction_by_Paolo_Brandimarte/CH8/EX8.14/Page_459_AsianMCGeoCV.R
|
3847f459b4d6eea05f684a1e85c98808243ed301
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 2,112
|
r
|
Page_459_AsianMCGeoCV.R
|
require(OptionPricing)
require(fBasics)
require(fOptions)
require(varbvs)
require(lmom)
GeometricAsian<-function(S0,K,r,T,sigma,delta,NSamples){
dT = T/NSamples
nu = r - sigma^2/2-delta
a = log(S0)+nu*dT+0.5*nu*(T-dT)
b = sigma^2*dT + sigma^2*(T-dT)*(2*NSamples-1)/6/NSamples
x = (a-log(K)+b)/sqrt(b)
P = exp(-r*T)*(exp(a+b/2)*cdfnor(x) - K*cdfnor(x-sqrt(b)))
return(P)
}
norm.interval = function(data, variance = var(data), conf.level = 0.95) {
z = qnorm((1 - conf.level)/2, lower.tail = FALSE)
xbar = mean(data)
sdx = sqrt(variance/length(data))
c(xbar - z * sdx, xbar + z * sdx)
}
AssetPaths <- function(S0,mu,sigma,T,NSteps,NRepl) {
SPaths = matrix(0,NRepl, 1+NSteps)
SPaths[,1] = S0
dt = T/NSteps
nudt = (mu-0.5*sigma^2)*dt
sidt = sigma*sqrt(dt)
for (i in 1:NRepl){
for (j in 1:NSteps){
SPaths[i,j+1]=SPaths[i,j]*exp(nudt + sidt*rnorm(1))
}
}
return(SPaths)
}
AsianMCGeoCV<-function(S0,K,r,T,sigma,NSamples,NRepl,NPilot){
# precompute quantities
DF = exp(-r*T)
GeoExact = GeometricAsian(S0,K,r,T,sigma,0,NSamples)
# pilot replications to set control parameter
GeoPrices = matrix(0,NPilot,1)
AriPrices = matrix(0,NPilot,1)
for (i in 1:NPilot){
Path=AssetPaths(S0,r,sigma,T,NSamples,1)
GeoPrices[i]=DF*max(0,(prod(Path[,2:(NSamples+1)]))^(1/NSamples) - K)
AriPrices[i]=DF*max(0,mean(Path[,2:(NSamples+1)]) - K)
}
MatCov = cov(cbind(GeoPrices, AriPrices))
c = - MatCov[1,2] / var(GeoPrices)
# MC run
ControlVars = matrix(0,NRepl,1)
for (i in 1:NRepl){
Path = AssetPaths(S0,r,sigma,T,NSamples,1)
GeoPrice = DF*max(0, (prod(Path[2:(NSamples+1)]))^(1/NSamples) - K)
AriPrice = DF*max(0, mean(Path[2:(NSamples+1)]) - K)
ControlVars[i] = AriPrice + c * (GeoPrice - GeoExact)
}
parameter_estimation<-.normFit(ControlVars)
ci<-norm.interval(ControlVars)
return(c(parameter_estimation,ci))
}
set.seed(2372)
S0 = 50
K = 55
r = 0.05
sigma = 0.4
T = 1
NSamples = 12
NRepl = 9000
NPilot = 1000
AsianMCGeoCV(S0,K,r,T,sigma,NSamples,NRepl,NPilot)
|
fda7beb7c6236af3fce438d243e181df30d350bc
|
504447c2284e6fc4c959c9b1c3859ffe14d43f7b
|
/forecast_pckg.R
|
fc3e9429fa3e14cd64a99b7aef6245edace8e91c
|
[] |
no_license
|
miladdoostan/Uber-Search-Time-Series-Forecasting
|
c4642f9c0618c9ade70c01da8f8e53ce0acc5a03
|
3216287f727ff6adf93c5eebeb078b172072fc68
|
refs/heads/master
| 2020-04-05T02:09:06.506316
| 2018-11-16T17:26:58
| 2018-11-16T17:26:58
| 156,465,844
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
forecast_pckg.R
|
# Necessary packages that should be installed for conducting the forecast job
install.packages('fpp2')
install.packages('xts')
install.packages('data.table')
install.packages('ggpubr')
install.packages('seasonal')
install.packages('repr')
|
23f7d56930f3fbcff7bbdf9e3e513d511cb4a1e7
|
738ed648f537dd49a0ce27b13230cc0c1cc82d18
|
/service_pmml_openscoring/modeling.R
|
f7b0d18f2774066a392cfee905dd186f0a4702a1
|
[] |
no_license
|
biwa7636/mlaas
|
300d651c13ac6c8e4c0244e50fbb2ba11a2787e8
|
2169cb439c7557835a2e198176eff6c95c92439a
|
refs/heads/master
| 2021-01-19T05:14:23.857902
| 2016-06-30T15:53:53
| 2016-06-30T15:53:53
| 62,154,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
r
|
modeling.R
|
library(e1071)
library(pmml)
data("iris")
model <- svm(data=iris, Species ~ .)
p <- pmml(model)
saveXML(p, "iris.xml")
|
d70564c2e15441dc81d11cc7e20c354570208eb7
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta_interleaved_matrices/AFL_communities_individual_based_sampling_beta_interleaved_matrices/communities_individual_based_sampling_beta_interleaved_matrices_valgrind_files/1615840681-test.R
|
ed932da489e82439229ddd1189d99d67d8c0f9c9
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 217
|
r
|
1615840681-test.R
|
testlist <- list(m = NULL, repetitions = 14L, in_m = structure(2.99939362779126e-241, .Dim = c(1L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result)
|
6f9edcbf3ac038e2fc167c30bc918d2b15e18ff4
|
964e8158ec7aebb48e7fb5d789673a41e607e45b
|
/R/class_clv_pnbd_staticcov.R
|
93851e4c81f6426e12ca0074ece0b3f5dd6a6ccc
|
[] |
no_license
|
lazycrazyowl/CLVTools
|
c8b3b2e08664923326d8155af70942eb0b89cf84
|
2ffe72f8ad751b04a1962ec3edf4c8ee75766717
|
refs/heads/master
| 2022-11-16T01:11:13.550308
| 2020-06-25T17:39:14
| 2020-06-25T17:39:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,113
|
r
|
class_clv_pnbd_staticcov.R
|
#' @templateVar name_model_full Pareto/NBD
#' @templateVar name_class_clvmodel clv.model.pnbd.static.cov
#' @template template_class_clvfittedmodels_staticcov
#'
#' @template template_slot_pnbdcbs
#'
#' @seealso \link{clv.fitted.static.cov-class}, \link{clv.model.pnbd.static.cov-class}, \link{clv.pnbd-class}, \link{clv.pnbd.dynamic.cov-class}
#'
#' @keywords internal
#' @importFrom methods setClass
#' @include class_clv_model_pnbd_staticcov.R class_clv_data_staticcovariates.R class_clv_fitted_staticcov.R
setClass(Class = "clv.pnbd.static.cov", contains = "clv.fitted.static.cov",
slots = c(
cbs = "data.table"),
# Prototype is labeled not useful anymore, but still recommended by Hadley / Bioc
prototype = list(
cbs = data.table()))
#' @importFrom methods new
clv.pnbd.static.cov <- function(cl, clv.data){
dt.cbs.pnbd <- pnbd_cbs(clv.data = clv.data)
clv.model <- clv.model.pnbd.static.cov()
return(new("clv.pnbd.static.cov",
clv.fitted.static.cov(cl=cl, clv.model=clv.model, clv.data=clv.data),
cbs = dt.cbs.pnbd))
}
|
e9b9b8e86e019d909dbb40da1aa30a5cf62b3c8f
|
5a4cbe8227f52ec315ae7351c8f7793aad70b521
|
/test_source/global.r
|
255b19f22bdf998222a83a53f28868dc21f2eaa1
|
[] |
no_license
|
DrRoad/fdk
|
199584cd3442b3d898efb4722a33dae0a25188d2
|
31db3f727c834b2d0dcc7c03bcaca53ee5775591
|
refs/heads/master
| 2023-05-11T16:18:56.351151
| 2021-05-14T10:11:43
| 2021-05-14T10:11:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,139
|
r
|
global.r
|
# GLOBAL ####
source_conf = list(source = "oc"
, date_cycle = "2021-05-01"
, db = list("full_sales"
, "full_forecast"
, "regressor"
#, "forecast_item_info"
)
, countries = c("NO", "SE", "DK", "FI", "NL", "BE", "IN", "LV", "LT", "EE")
#, countries = "RU"
, filters = list(category = "HistoricalSales"
, cycle_category = "before_cleansing")
, gbus = c("GEM")
, join_hist_forecast = T)
parameter <- get_default_hyperpar()
oc_data <- import_data(source_conf = source_conf)
forecast_item_list <- oc_data$sales$forecast_item %>%
unique() %>%
sort()
insight_data <- get_insight_data(oc_data = oc_data
, key = "FI: 474452"
, parameter = get_default_hyperpar())
get_graph_stat(insight_data = insight_data, graph_type = "seas_me")
get_tables(insight_data = insight_data, table_type = "year_agg")
|
13897d743a3532279df90615d3466cdb140cc723
|
806fedb9f7be2fffeb350badb7fa7699ba092dd3
|
/src/prep/prep_acs.R
|
f596a22503f25ba5c422916013e1c37135546a9d
|
[] |
no_license
|
gopalpenny/climateviews
|
5c6455f3398dcbd1dcc541a0a147b9dea6f7ea09
|
7c290705dd6f473bebc6bc264689fd75a1dcda2e
|
refs/heads/master
| 2020-12-21T10:43:14.845610
| 2020-02-01T01:38:14
| 2020-02-01T01:38:14
| 236,407,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 453
|
r
|
prep_acs.R
|
# https://cran.r-project.org/web/packages/tidycensus/tidycensus.pdf
# vignette: https://walkerke.github.io/tidycensus/articles/basic-usage.html
v18 <- tidycensus::load_variables(2018, "acs5", cache = TRUE)
v18 %>% filter(grepl("TOTAL POPULATION COVERAGE RATE",concept))
v18_female <- v18 %>% filter(grepl("[Ff]emale",label),grepl("EDUCATION",concept))
v18_female$label
# tidycensus::get_acs(geography="county",variables = c(total_race="B98013_001"),
|
587443ed991fde6ed2bcdac173afefaffb074a01
|
67dc617e4cf6730f4231c87578e85bbca9dd4d4a
|
/man/nneighb.Rd
|
b29535eeee1e17430fb69135dc39b3948bf9a7f2
|
[] |
no_license
|
cran/kdtools
|
f5ed74d2aac44842fee6ce6c7714ac6f16b72576
|
177d3966d4342c6296a03fd187a527589fe8e59c
|
refs/heads/master
| 2021-11-19T11:06:07.446362
| 2021-10-08T07:30:02
| 2021-10-08T07:30:02
| 128,370,836
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,059
|
rd
|
nneighb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kdtools.R
\name{kd_nearest_neighbors}
\alias{kd_nearest_neighbors}
\alias{kd_nearest_neighbors.matrix}
\alias{kd_nearest_neighbors.arrayvec}
\alias{kd_nearest_neighbors.data.frame}
\alias{kd_nn_indices}
\alias{kd_nn_indices.arrayvec}
\alias{kd_nn_indices.matrix}
\alias{kd_nn_indices.data.frame}
\alias{kd_nearest_neighbor}
\alias{kd_nearest_neighbor.matrix}
\alias{kd_nearest_neighbor.arrayvec}
\title{Find nearest neighbors}
\usage{
kd_nearest_neighbors(x, v, n, ...)
\method{kd_nearest_neighbors}{matrix}(x, v, n, cols = NULL, alpha = 0, ...)
\method{kd_nearest_neighbors}{arrayvec}(x, v, n, ...)
\method{kd_nearest_neighbors}{data.frame}(x, v, n, cols = NULL, w = NULL, ...)
kd_nn_indices(x, v, n, ...)
\method{kd_nn_indices}{arrayvec}(x, v, n, distances = FALSE, ...)
\method{kd_nn_indices}{matrix}(x, v, n, cols = NULL, distances = FALSE, alpha = 0, ...)
\method{kd_nn_indices}{data.frame}(x, v, n, cols = NULL, w = NULL, distances = FALSE, ...)
kd_nearest_neighbor(x, v)
\method{kd_nearest_neighbor}{matrix}(x, v)
\method{kd_nearest_neighbor}{arrayvec}(x, v)
}
\arguments{
\item{x}{an object sorted by \code{\link{kd_sort}}}
\item{v}{a vector specifying where to look}
\item{n}{the number of neighbors to return}
\item{...}{ignored}
\item{cols}{integer or character vector or formula indicating columns}
\item{alpha}{approximate neighbors within (1 + alpha)}
\item{w}{distance weights}
\item{distances}{return distances as attribute if true}
}
\value{
\tabular{ll}{
\code{kd_nearest_neighbors} \tab one or more rows from the sorted input \cr
\code{kd_nn_indices} \tab a vector of row indices indicating the result \cr
\code{kd_nearest_neighbor} \tab the row index of the neighbor \cr
}
}
\description{
Find nearest neighbors
}
\examples{
if (has_cxx17()) {
x = matrix(runif(200), 100)
y = matrix_to_tuples(x)
kd_sort(y, inplace = TRUE)
y[kd_nearest_neighbor(y, c(1/2, 1/2)),]
kd_nearest_neighbors(y, c(1/2, 1/2), 3)
y[kd_nn_indices(y, c(1/2, 1/2), 5),]
}
}
|
f61285c9a876ececfbab5d1ecedf09ac42642827
|
fff7d4bd3016abe13e448fcf8a62acdf09c66bf7
|
/Agosto-Diciembre 2022/funciones/probar funciones.R
|
d4e635a30897aedc3244798796277f6793a3aa26
|
[] |
no_license
|
rpizarrog/probabilidad-y-estad-stica
|
205193d69ac003fc0855b317b3f6b4d2224143c7
|
c392c49a6eb9f24a21a41f9a88cfd9179c3d738d
|
refs/heads/master
| 2023-05-29T23:09:21.081697
| 2023-05-29T15:50:53
| 2023-05-29T15:50:53
| 100,639,325
| 2
| 3
| null | 2017-08-18T17:05:22
| 2017-08-17T19:43:46
|
HTML
|
UTF-8
|
R
| false
| false
| 833
|
r
|
probar funciones.R
|
# Función que muestra un saludo
f_saludo1 <- function() {
print("Hola saludos")
}
f_saludo2 <- function(nombre) {
paste("Hola", nombre, "como estás")
}
f_promedio <- function(numeros) {
print(sum(numeros) / length(numeros))
}
f_saludo3 <- function(nombre, edad) {
paste("Hola", nombre, "como estás.", "Tienes ", edad, "años")
}
saludo1 <- function() {
print("Hola")
}
saludo2 <- function(nombre) {
cat("Hola", nombre)
}
saludo3 <- function(nombre, edad) {
cat("Hola", nombre, "tienes", edad, "años. Felicidades")
}
promedio <- function(numeros) {
n <- length(numeros)
promedio = sum(numeros) / n
cat(promedio)
}
crea_datos <- function(r) {
datos <- data.frame(x = sample(10:100, size = r),
y = sample(10:100, size = r))
datos
}
|
95de37151e51362f317ea5eabe95f4fc1b392d53
|
0d74c6026340636cb7a73da2b53fe9a80cd4d5a5
|
/SupportingDocs/Examples/Version05/ex17/ex17.R
|
914b015735980ac49baea74a95d66ad3def4cb19
|
[] |
no_license
|
simsem/simsem
|
941875bec2bbb898f7e90914dc04b3da146954b9
|
f2038cca482158ec854a248fa2c54043b1320dc7
|
refs/heads/master
| 2023-05-27T07:13:55.754257
| 2023-05-12T11:56:45
| 2023-05-12T11:56:45
| 4,298,998
| 42
| 23
| null | 2015-06-02T03:50:52
| 2012-05-11T16:11:35
|
R
|
UTF-8
|
R
| false
| false
| 3,416
|
r
|
ex17.R
|
library(simsem)
popModel <- "
f1 =~ 1*y1 + 1*y2 + 1*y3 + con1*y2 + con2*y3
f2 =~ 1*y4 + 1*y5 + 1*y6 + con3*y5 + con4*y6
f3 =~ 1*y7 + 1*y8 + 1*y9 + con5*y8 + con6*y9
f4 =~ 1*y10 + 1*y11 + 1*y12 + con1*y11 + con2*y12
f5 =~ 1*y13 + 1*y14 + 1*y15 + con3*y14 + con4*y15
f6 =~ 1*y16 + 1*y17 + 1*y18 + con5*y17 + con6*y18
f7 =~ 1*y19 + 1*y20 + 1*y21 + con1*y20 + con2*y21
f8 =~ 1*y22 + 1*y23 + 1*y24 + con3*y23 + con4*y24
f9 =~ 1*y25 + 1*y26 + 1*y27 + con5*y26 + con6*y27
f4 ~ 0.6*f1 + con7*f1
f5 ~ start(0.3)*f1 + 0.6*f2 + con8*f1 + con9*f2
f6 ~ start(0.3)*f2 + 0.6*f3 + con10*f2 + con11*f3
f7 ~ 0.6*f4 + con7*f4
f8 ~ start(0.3)*f4 + 0.6*f5 + con8*f4 + con9*f5
f9 ~ start(0.3)*f5 + 0.6*f6 + con10*f5 + con11*f6
f1 ~~ 1*f1
f2 ~~ 1*f2
f3 ~~ 1*f3
f4 ~~ 0.6*f4 + con12*f4
f5 ~~ 0.6*f5 + con13*f5
f6 ~~ 0.6*f6 + con14*f6
f7 ~~ 0.6*f7 + con12*f7
f8 ~~ 0.6*f8 + con13*f8
f9 ~~ 0.6*f9 + con14*f9
f1 ~~ 0.4*f2
f1 ~~ 0.4*f3
f2 ~~ 0.4*f3
y1 ~~ 0.5*y1
y2 ~~ 0.5*y2
y3 ~~ 0.5*y3
y4 ~~ 0.5*y4
y5 ~~ 0.5*y5
y6 ~~ 0.5*y6
y7 ~~ 0.5*y7
y8 ~~ 0.5*y8
y9 ~~ 0.5*y9
y10 ~~ 0.5*y10
y11 ~~ 0.5*y11
y12 ~~ 0.5*y12
y13 ~~ 0.5*y13
y14 ~~ 0.5*y14
y15 ~~ 0.5*y15
y16 ~~ 0.5*y16
y17 ~~ 0.5*y17
y18 ~~ 0.5*y18
y19 ~~ 0.5*y19
y20 ~~ 0.5*y20
y21 ~~ 0.5*y21
y22 ~~ 0.5*y22
y23 ~~ 0.5*y23
y24 ~~ 0.5*y24
y25 ~~ 0.5*y25
y26 ~~ 0.5*y26
y27 ~~ 0.5*y27
y1 ~~ 0.2*y10
y2 ~~ 0.2*y11
y3 ~~ 0.2*y12
y4 ~~ 0.2*y13
y5 ~~ 0.2*y14
y6 ~~ 0.2*y15
y7 ~~ 0.2*y16
y8 ~~ 0.2*y17
y9 ~~ 0.2*y18
y10 ~~ 0.2*y19
y11 ~~ 0.2*y20
y12 ~~ 0.2*y21
y13 ~~ 0.2*y22
y14 ~~ 0.2*y23
y15 ~~ 0.2*y24
y16 ~~ 0.2*y25
y17 ~~ 0.2*y26
y18 ~~ 0.2*y27
y1 ~~ 0.04*y19
y2 ~~ 0.04*y20
y3 ~~ 0.04*y21
y4 ~~ 0.04*y22
y5 ~~ 0.04*y23
y6 ~~ 0.04*y24
y7 ~~ 0.04*y25
y8 ~~ 0.04*y26
y9 ~~ 0.04*y27
med := con8 * con10
"
analyzeModel1 <- "
f1 =~ 1*y1 + con1*y2 + con2*y3
f2 =~ 1*y4 + con3*y5 + con4*y6
f3 =~ 1*y7 + con5*y8 + con6*y9
f4 =~ 1*y10 + con1*y11 + con2*y12
f5 =~ 1*y13 + con3*y14 + con4*y15
f6 =~ 1*y16 + con5*y17 + con6*y18
f7 =~ 1*y19 + con1*y20 + con2*y21
f8 =~ 1*y22 + con3*y23 + con4*y24
f9 =~ 1*y25 + con5*y26 + con6*y27
f4 ~ con7*f1
f5 ~ con8*f1 + con9*f2
f6 ~ con10*f2 + con11*f3
f7 ~ con7*f4
f8 ~ con8*f4 + con9*f5
f9 ~ con10*f5 + con11*f6
f1 ~~ f1
f2 ~~ f2
f3 ~~ f3
f4 ~~ con12*f4
f5 ~~ con13*f5
f6 ~~ con14*f6
f7 ~~ con12*f7
f8 ~~ con13*f8
f9 ~~ con14*f9
f1 ~~ f2
f1 ~~ f3
f2 ~~ f3
y1 ~~ y1
y2 ~~ y2
y3 ~~ y3
y4 ~~ y4
y5 ~~ y5
y6 ~~ y6
y7 ~~ y7
y8 ~~ y8
y9 ~~ y9
y10 ~~ y10
y11 ~~ y11
y12 ~~ y12
y13 ~~ y13
y14 ~~ y14
y15 ~~ y15
y16 ~~ y16
y17 ~~ y17
y18 ~~ y18
y19 ~~ y19
y20 ~~ y20
y21 ~~ y21
y22 ~~ y22
y23 ~~ y23
y24 ~~ y24
y25 ~~ y25
y26 ~~ y26
y27 ~~ y27
y1 ~~ y10
y2 ~~ y11
y3 ~~ y12
y4 ~~ y13
y5 ~~ y14
y6 ~~ y15
y7 ~~ y16
y8 ~~ y17
y9 ~~ y18
y10 ~~ y19
y11 ~~ y20
y12 ~~ y21
y13 ~~ y22
y14 ~~ y23
y15 ~~ y24
y16 ~~ y25
y17 ~~ y26
y18 ~~ y27
y1 ~~ y19
y2 ~~ y20
y3 ~~ y21
y4 ~~ y22
y5 ~~ y23
y6 ~~ y24
y7 ~~ y25
y8 ~~ y26
y9 ~~ y27
med := con8 * con10
"
Output1 <- sim(1000, n=200, analyzeModel1, generate=popModel, lavaanfun="lavaan")
summary(Output1)
analyzeModel2 <- "
f7 =~ 1*y19 + con1*y20 + con2*y21
f8 =~ 1*y22 + con3*y23 + con4*y24
f9 =~ 1*y25 + con5*y26 + con6*y27
f8 ~ a*f7
f9 ~ b*f8
f7 ~~ f7
f8 ~~ f8
f9 ~~ f9
y19 ~~ y19
y20 ~~ y20
y21 ~~ y21
y22 ~~ y22
y23 ~~ y23
y24 ~~ y24
y25 ~~ y25
y26 ~~ y26
y27 ~~ y27
med := a * b
"
Output2 <- sim(1000, n=200, analyzeModel2, generate=popModel, lavaanfun="lavaan")
summary(Output2)
summaryParam(Output2, matchParam = TRUE)
|
1ddce8b53df6a16b9230ab5a5e7d6a05c8ef261a
|
f37e28e6a95d3e455317341d166d4dc1ce0d7a25
|
/R/print.summary.swim.R
|
ac9f9dd79a3d65c4c4254b7e105b1898a3cc8f65
|
[] |
no_license
|
kimwhoriskey/swim
|
ff41ea8be6c25098700732782f894fbf807fa781
|
c612fdd7404a433ae6271758f0e36d57cdb7907e
|
refs/heads/master
| 2021-07-25T00:03:59.435303
| 2020-03-21T03:23:56
| 2020-03-21T03:23:56
| 248,900,088
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
print.summary.swim.R
|
#prints the output from the summary of the swim object
#' @export
print.summary.swim <- function(x){
cat("Hidden Markov Movement Model:\n")
cat("\n Time to fit: \n")
print(x[[1]])
cat("\n Confidence Intervals: \n")
print(round(x[[2]][9:18,-2], 7))
# cat("\nWald Tests: \n")
#
# printCoefmat(x[[2]][9:18,], P.values=TRUE, has.Pvalue=TRUE)
}
|
0b8e78e577068febcfed9f343b95e7269b5404cb
|
4e6396b61d67aeb1d26d5a25616e63dc236f4454
|
/MA7_maok.R
|
6aab78c58db86d0c316a249614a7be8fe8dd04ca
|
[] |
no_license
|
ctejeda86/merger-table
|
7aec096647f49962b102850f1e026a93c5b2051b
|
712239e81395ab5b071764568a9eac56e005d5c8
|
refs/heads/master
| 2020-09-12T05:52:06.605354
| 2019-11-18T00:29:05
| 2019-11-18T00:29:05
| 222,331,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,670
|
r
|
MA7_maok.R
|
getwd()
setwd("C:/Users/HP/Documents/Termo/MA7")
library(dplyr)
##guardar bmv como csv y cambiar nombre a porcentaje
bmv<-read.csv("bmv.csv",stringsAsFactors = FALSE)
colnames(bmv)[13]<-"VARPORC"
##guardar ma7 economática
ma7_econ<-read.csv("ma7_econ.csv",stringsAsFactors = FALSE)
ma7_econ<-select(ma7_econ,-Tipo.de.Activo,-Bolsa...Fuente,-Activo...Cancelado)
#PARA CAMBIAR NOMBRE COLUMNAS DE ma7_econ
var<-c("Nombre","clase","cod","sector","max52sem","min52sem","cierre_prev","cierre_hoy","max_hoy","min_hoy","ret_hoy","ret_sem","ret_mes","ret_año","ret_ytd","vol","upa","vla","pvl","pu","fecha")
names(ma7_econ)=var
##cambiar "-" a fecha
##SUSTITUIR EN EL CÓDIGO 30/09/2019 por el trimestre más reciente, encerrado entre comillas
ma7_econ$fecha<-str_replace(ma7_econ$fecha,"-","30/09/2019")
#cambiar a numeric columnas
ma7_econ[5:20] <- lapply(ma7_econ[5:20], as.numeric)
#sustituir ceros
ma7_econ[is.na(ma7_econ)] <- 0
##Crear columna serie2 en bmv para obtener más y menos perdedoras
bmv$SERIE2<-bmv$SERIE
bmv$EMISORA2<-bmv$EMISORA
#eliminar * en serie
bmv$SERIE[bmv$SERIE == "*"]<- ""
#Concatenar serie y emisora de información bmv
bmv$EMISORA<-paste0(bmv$EMISORA,bmv$SERIE)
#cambiar nombre en ma7_econ a emisora para que coincida con archivo bmv
colnames(ma7_econ)[ colnames(ma7_econ) == "cod" ] <- "EMISORA"
#combinar las 2 tablas por el código
ma7<-merge(bmv,ma7_econ,by="EMISORA")
#cambiar sectores
ma7$sector<-str_replace(ma7$sector,"Alimentos y Beb","Alim. y Beb")
ma7$sector<-str_replace(ma7$sector,"Finanzas y Seguros","Fin. y Seguros")
ma7$sector<-str_replace(ma7$sector,"Siderur & Metalur","Sider. & Met.")
ma7$sector<-str_replace(ma7$sector,"Siderur & Metalur","Sider. & Met.")
ma7$sector<-str_replace(ma7$sector,"-","Bancos y Fin.")
ma7$sector<-str_replace(ma7$sector,"Minerales no Met","Min. no Met.")
ma7$Nombre<-str_replace(ma7$Nombre,"America Movil","América Móvil")
ma7$Nombre<-str_replace(ma7$Nombre,"GMexico","GMéxico")
ma7$Nombre<-str_replace(ma7$Nombre,"Wal Mart de Mexico","Wal Mart de México")
#crear m2_mas
ma2_mas<-ma7 %>% arrange(-VARPORC)
ma2_mas<-ma2_mas[1:5,] %>% select(EMISORA2,SERIE2,ÚLTIMO,VARPORC,ret_mes,ret_ytd)
#m2_menos
ma2_men<-ma7 %>% arrange(VARPORC)
ma2_men<-ma2_men[1:5,] %>% select(EMISORA2,SERIE2,ÚLTIMO,VARPORC,ret_mes,ret_ytd)
#crear ma7
ma7<-ma7 %>% select (Nombre,EMISORA,sector,max52sem,min52sem,cierre_prev,PPP,max_hoy,min_hoy,VARPORC,ret_sem,ret_mes,ret_año,ret_ytd,vol,upa,vla,pvl,pu,fecha)
write.csv(ma7,file="ma7.csv")
write.table(ma7,file="ma7.txt",sep="\t",quote=FALSE,col.names=FALSE,row.names = FALSE)
|
f4a2f7694101a751a4e195189f60f0f84d807a11
|
d6d7024184d97504457b03cb622906a15305e0ad
|
/src/RCodeBase/7. IncrementalSpends.R
|
62aa61cdc6e9074774c4906cdce48b023f6b2d79
|
[] |
no_license
|
ssandipansamanta/marketing_campaign_effectiveness
|
b137e607cf52e90072ecd5eaa7657414ba5ee25e
|
0bf9ddb7a77467407f6cdb338890b95b3fbe7b8e
|
refs/heads/master
| 2020-04-17T23:57:26.388909
| 2019-01-27T18:24:07
| 2019-01-27T18:24:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,412
|
r
|
7. IncrementalSpends.R
|
IncrementalSpend <- function(InputData,groupbyLevel1,groupbyLevel2){
IncTotalSpend <- InputData[,.(TotalSpend = sum(Spend,na.rm = TRUE)), by = groupbyLevel1]
IncAvgSpend <- IncTotalSpend[,.(ActualSpend = sum(TotalSpend,na.rm = TRUE),
AvgSpend = mean(TotalSpend,na.rm = TRUE),
NoofCust = .N), by = groupbyLevel2]
ControlSpend <- data.table:::subset.data.table(IncAvgSpend,Group == "Control")$AvgSpend
IncAvgSpend[,c('Incremental_Sales_dollar','Incremental_Sales_percent') :=
list((ActualSpend - (ControlSpend * NoofCust)),
(ActualSpend / (ControlSpend * NoofCust) - 1)*100)]
return(IncAvgSpend)
}
IncrementalSpendSummary <- IncrementalSpend(InputData = SenstivityAnalysis(InputData = ADS,
StDate = StartDate_IncSales,
EnDate = EndDate_IncSales,
groupby = c('UserID','Group','CustomerType'),
Skewnessthreshold = Skewnessthreshold),
groupbyLevel1=c('Group','CustomerType','UserID'),
groupbyLevel2 = c('Group','CustomerType'))
WriteOutput(OutputDataFrame = IncrementalSpendSummary,
NameofFile = 'Q3_IncrementalSpendSummary',RemoveFile = TRUE)
|
62298139d9c078efb659e9b346c2ea0aefe74292
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CHNOSZ/tests/test-util.list.R
|
39a525efe0c7aae8c5473be55eb455786b09c2c4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
r
|
test-util.list.R
|
context("util.list")
test_that("which.pmax() properly applies attributes, and also works for lists of length 1", {
testlist <- list(a=matrix(c(1,2,3,4)), b=matrix(c(4,3,2,1)))
testattr <- attributes(testlist[[1]])
expect_equal(attributes(which.pmax(testlist)), testattr)
expect_equal(as.numeric(which.pmax(testlist[1])), c(1, 1, 1, 1))
})
|
cf8e26ae34069e95ed90cc42dea1537eff5da136
|
316516337da2ca6d86b7da32e14149728177f1e4
|
/R/insample_sim_hh1.R
|
88219cccf74f96c26983205b132b4dab4e67e31d
|
[] |
no_license
|
swihart/wfpca
|
7f208bb895dfb9b9dfd57723fac7fb92031135e3
|
4814cdf4648a9d9631df1e705858512f2e84d143
|
refs/heads/master
| 2020-05-19T08:11:38.442105
| 2015-07-08T16:21:45
| 2015-07-08T16:21:45
| 23,965,545
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,514
|
r
|
insample_sim_hh1.R
|
#' Hopkins Hybrid 1: Run a simulation and then perform prediction. Based on insample_sim(). Censoring is
#' fabricated a la the SES of the Growth data. We will make a _hh2 to do a more realistic censoring.
#'
#' This function comes after many months of running readme_sim and talks with Bryan Lau on
#' what we need to demonstrate for the method.
#'
#' @param sim_seed passed to set.seed()
#' @param sample_size (defaults to 1000) and must be a multiple of 100
#' @param sim_slope see slope in calculate_ses()
#' @param sim_intercept see intercept in calculate_ses()
#' @param sim_ses_coef see ses_coef in apply_censoring()
#' @param sim_age_coef see age_coef in apply_censoring()
#' @param hh_rds the path in single quotes to the local copy of hopkins_hybrid.RDS
#' @export
#' @return results a data frame with rmse for each approach for that simulated dataset
#' @examples
#' ---
insample_sim_hh1 <- function(sim_seed=101, sample_size=1000, sim_slope=100,
sim_intercept=12, sim_ses_coef=.01, sim_age_coef=.01,
hh_rds='./data_local/hopkins_hybrid.RDS'){
## quick start with the default values as variables for testing. Loads packages.
#test_prep_script()
## get the data prepped, that is simulated and censored and calculate missing.
#simulate_censor_summarize()
## just for testing; comment out before github commits
library(devtools); library(roxygen2); install(); document();
hh_rds='./data_local/hopkins_hybrid.RDS'; sim_seed=101; sample_size=1000; sim_slope=100; sim_intercept=12; sim_ses_coef=.01; sim_age_coef=.01;
##hh_rds='./data_local/hopkins_hybrid.RDS'; sim_seed=101; sample_size=5000; sim_slope=100; sim_intercept=12; sim_ses_coef=.01; sim_age_coef=.01;
##hh_rds='./data_local/hopkins_hybrid.RDS'; sim_seed=101; sample_size=5000; sim_slope=100; sim_intercept=12; sim_ses_coef=.005; sim_age_coef=.005;
## d will be a 39 x 32 matrix based on the males from the Berkeley Growth Study in `fda` package
## we oversample d based on the fpc as well extract out the ages of measurement
d<-readRDS(hh_rds)
age_vec <- c(as.numeric(colnames(d[,-1,with=FALSE])))
over_samp_mat<-sample_data_fpc(as.matrix(d), sample_size, seed=sim_seed, timepoints=age_vec)
## we calculate ses on the oversampled dataset and turn the matrix into a long dataset.
with_ses <- calculate_ses(over_samp_mat, slope=sim_slope, intercept=sim_intercept)
long <-make_long(with_ses)
## In this chunk we apply censoring.
censored <- apply_censoring(long, ses_coef=sim_ses_coef, age_coef=sim_age_coef, protected=1:4)
## Measurement error: within 1/8 inch. Can comment out or change.
censored$inches <- censored$inches + runif(length(censored$inches), -1/8,1/8)
## observed_with_stipw has the standardized inverse probability weights (stipw)
## wtd_trajectories has same info as observed_with_stipw but has inches_wtd_hadamard as well
## we calculate all_with_stipw with NAs
all_with_stipw <- calculate_stipw(censored,"keep")
observed_with_stipw <- calculate_stipw(censored,"omit")
wtd_trajectories <- calculate_wtd_trajectories(observed_with_stipw)
## use data.table where possible speeds up future rbinds post simulation
#setDT(long)
#setkey(long, id, age)
setDT(all_with_stipw)
setkey(all_with_stipw, newid, age, inches, ses)
setDT(wtd_trajectories)
setkey(wtd_trajectories, newid, age, inches, ses)
interim <- wtd_trajectories[all_with_stipw]
## BEGIN: new step...DEAN:
key.interim <- unique(interim[,c("age","remaining","denom"), with=FALSE])[!is.na(remaining) & !is.na(remaining)]
setkey(key.interim, age)
setkey(interim, age)
holder<-key.interim[interim]
holder[is.na(stipw01.n), stipw01.n := remaining*(stipw2/denom) ]
## END: new step...DEAN:
## Dean step: change below to holder (formerly interim)
all_with_stipw<-subset(holder,
select=c(names(all_with_stipw),
"inches_wtd_hadamard",
"remaining",
"stipw",
"stipw01",
"stipw01.n"))
## DEAN step: reset keys;
setkey(all_with_stipw, newid, age, inches, ses)
setkey(wtd_trajectories, newid, age, inches, ses)
all_with_stipw[ , inches_ltfu:=inches ]
all_with_stipw[is.na(stipw), inches_ltfu:=NA ]
## calculate "remean" data prep:
all_with_stipw[,
inches_wtd_remean:= inches_ltfu -
mean(inches_ltfu, na.rm=T) +
mean(inches_wtd_hadamard , na.rm=T),
by=age]
## standardize the names selected across all approaches and their resultant data sets
selected<-c("newid","age", "ses",
"inches", "inches_wtd_hadamard","inches_wtd_remean", "inches_ltfu", "inches_predicted",
"remaining",
"stipw",
"stipw01",
"stipw01.n",
"approach")
## Note: we can calulate this post-simulation
## truth, all data:
##true_avg <- ddply(long, .(age), function(w) mean(w$inches))
##true_avg$approach<- "true_avg"
## note: we're simplifying to (w)fpca and (w)lme
## compare the bias of mean trajectories of the following approaches applied to the
## censored/observed data, not the truth:
##a) naive-nonparametric: that is, just na.rm=TRUE and turn the mean() crank
# naive_non_parm_avg <- ddply(observed_with_stipw, .(age), function(w) mean(w$inches))
# naive_non_parm_avg$approach <- "naive_non_parm_avg"
# ## combine with long for the prediction:
# setDT(naive_non_parm_avg)
# setnames(naive_non_parm_avg, "V1", "inches_predicted")
# setkey(naive_non_parm_avg, age, inches_predicted)
# setkey(long, age) ## no id in this method.
# dim(naive_non_parm_avg)
# dim(long)
# naive_non_parm_avg <- naive_non_parm_avg[long]
# setkey(naive_non_parm_avg, newid, age, inches_predicted)
# naive_non_parm_avg <- subset(naive_non_parm_avg, select=c("newid","age", "inches", "inches_predicted", "approach"))
## note: we're simplifying to (w)fpca and (w)lme
##b) weighted-nonparametric: a little more sophisticated, na.rm=TRUE and turn the weighted mean() crank
# wtd_non_parm_avg <- ddply(observed_with_stipw, .(age), function(w) sum(w$stipw*w$inches)/sum(w$stipw))
# wtd_non_parm_avg$approach <- "wtd_non_parm_avg"
# ## combine with long for the prediction:
# setDT(wtd_non_parm_avg)
# setnames(wtd_non_parm_avg, "V1", "inches_predicted")
# setkey(wtd_non_parm_avg, age, inches_predicted)
# setkey(long, age) ## no id in this method.
# dim(wtd_non_parm_avg)
# dim(long)
# wtd_non_parm_avg <- wtd_non_parm_avg[long]
# setkey(wtd_non_parm_avg, newid, age, inches_predicted)
# wtd_non_parm_avg <- subset(wtd_non_parm_avg, select=c("newid","age", "inches", "inches_predicted", "approach"))
##c) naive-FPC,
unwtd_fncs <- dcast(data=wtd_trajectories, formula= newid~age, value.var="inches")
naive_fpc_proc_minutes <- system.time(
fpca_unwtd_fncs <- fpca.face(Y=as.matrix(unwtd_fncs)[,-1], argvals=age_vec, knots=7)
)[3]/60
naive_fpc <- data.frame(age=age_vec, V1=fpca_unwtd_fncs$mu, approach="naive_fpc")
## combine with long for the prediction:
## a little different than previous examples; now we do have individual level curves
## need to extract them (Yhat) and rename them and data.table them
naive_fpc_indiv <- as.data.frame(cbind(1:nrow(fpca_unwtd_fncs$Yhat),fpca_unwtd_fncs$Yhat))
colnames(naive_fpc_indiv) <- colnames(unwtd_fncs)
setDT(naive_fpc_indiv)
naive_fpc <- melt(naive_fpc_indiv,
id.vars=c("newid"),
variable.name = "age",
variable.factor=FALSE,
value.name="inches_predicted")
naive_fpc[,approach:="naive_fpc",]
naive_fpc[,age:=as.numeric(age)]
setDT(naive_fpc)
setkey(naive_fpc, newid, age)
naive_fpc <- naive_fpc[all_with_stipw]
setkey(naive_fpc, newid, age)
naive_fpc <- subset(naive_fpc, select=selected)
## add these on post dean: minutes and number of principle components (npc)
naive_fpc[, minutes:=naive_fpc_proc_minutes]
naive_fpc[, number_pc:=fpca_unwtd_fncs$npc]
## visual checks
# ggplot(data=naive_fpc[newid %in% c(2)], aes(x=age, y=inches_predicted, color=factor(newid)))+geom_point()+
# geom_point(aes(y=inches_ltfu), color='black')
## visual checks
# ggplot(data=naive_fpc[newid %in% c(1,2,5)],
# aes(x=age, y=inches_predicted, color=factor(newid)))+
# geom_path()+
# geom_point()+
# geom_line(aes(y=inches_ltfu, id=factor(newid)), color='black' )+
# geom_point(aes(y=inches_ltfu, id=factor(newid), shape=factor(newid)), color='black')
##e) remean - weighted-FPC.
wtd_remean_fncs <- dcast(data=subset(all_with_stipw,
select=c("newid","age","inches_wtd_remean")),
formula= newid~age, value.var="inches_wtd_remean")
weighted_remean_fpc_proc_minutes <- system.time(
fpca_wtd_remean_fncs <- fpca.face(Y=as.matrix(wtd_remean_fncs)[,-1], argvals=age_vec, knots=7)
)[3]/60
#weighted_fpc <- data.frame(age=age_vec, V1=fpca_wtd_fncs$mu, approach="weighted_fpc")
## combine with long for the prediction:
## a little different than previous examples; now we do have individual level curves
## need to extract them (Yhat) and rename them and data.table them
weighted_remean_fpc_indiv <- as.data.frame(cbind(1:nrow(fpca_wtd_remean_fncs$Yhat),
fpca_wtd_remean_fncs$Yhat))
colnames(weighted_remean_fpc_indiv) <- colnames(wtd_remean_fncs)
setDT(weighted_remean_fpc_indiv)
weighted_remean_fpc <- melt(weighted_remean_fpc_indiv,
id.vars=c("newid"),
variable.name = "age",
variable.factor=FALSE,
value.name="inches_predicted")
weighted_remean_fpc[,approach:="wtd_remean_fpc",]
weighted_remean_fpc[,age:=as.numeric(age)]
setDT(weighted_remean_fpc)
setkey(weighted_remean_fpc, newid, age)
weighted_remean_fpc <- weighted_remean_fpc[all_with_stipw]
setkey(weighted_remean_fpc, newid, age)
weighted_remean_fpc <- subset(weighted_remean_fpc, select=selected)
## add these on post dean: minutes and number of principle components (npc)
weighted_remean_fpc[, minutes:=weighted_remean_fpc_proc_minutes]
weighted_remean_fpc[, number_pc:=fpca_wtd_remean_fncs$npc]
## visual checks
ggplot(data=weighted_remean_fpc[newid %in% c(1,2,5)],
aes(x=age, y=inches_predicted, color=factor(newid)))+
geom_path()+
geom_point()+
geom_line(aes(y=inches_ltfu, id=factor(newid)), color='black' )+
geom_point(aes(y=inches_ltfu, id=factor(newid), shape=factor(newid)), color='black')
##e) weighted-FPC.
wtd_fncs <- dcast(data=wtd_trajectories, formula= newid~age, value.var="inches_wtd_hadamard")
weighted_fpc_proc_minutes <- system.time(
fpca_wtd_fncs <- fpca.face(Y=as.matrix(wtd_fncs)[,-1], argvals=age_vec, knots=7)
)[3]/60
#weighted_fpc <- data.frame(age=age_vec, V1=fpca_wtd_fncs$mu, approach="weighted_fpc")
## combine with long for the prediction:
## a little different than previous examples; now we do have individual level curves
## need to extract them (Yhat) and rename them and data.table them
weighted_fpc_indiv <- as.data.frame(cbind(1:nrow(fpca_wtd_fncs$Yhat),fpca_wtd_fncs$Yhat))
colnames(weighted_fpc_indiv) <- colnames(wtd_fncs)
setDT(weighted_fpc_indiv)
weighted_fpc <- melt(weighted_fpc_indiv,
id.vars=c("newid"),
variable.name = "age",
variable.factor=FALSE,
value.name="inches_predicted")
weighted_fpc[,approach:="wtd_hadamard_fpc",]
weighted_fpc[,age:=as.numeric(age)]
setDT(weighted_fpc)
setkey(weighted_fpc, newid, age)
weighted_fpc <- weighted_fpc[all_with_stipw]
## begin extra steps: (weighted inputs average out for mean, but need to be de-weighted for individual)
setDT(wtd_trajectories)
## pre-DEAN: wts <- subset(wtd_trajectories, select=c("newid","age","stipw01.n"))
## post-DEAN: subset all_with_stipw
wts <- subset(all_with_stipw, select=c("newid","age","stipw01.n"))
setkey(wts, newid, age)
## can't do curve completion with these three knuckleheads
#weighted_fpc.test<-weighted_fpc[wts]
#weighted_fpc.test[, inches_predicted_weighted:= inches_predicted]
#weighted_fpc.test[, inches_predicted_deweighted:= inches_predicted/stipw01.n]
weighted_fpc.test<-wts[weighted_fpc]
## see how many 0's
weighted_fpc.test[,table(round(stipw01.n,2))]
## change 0's to NA
weighted_fpc.test[stipw01.n==0, stipw01.n:=NA]
weighted_fpc.test[, inches_predicted_weighted:= inches_predicted]
weighted_fpc.test[!is.na(stipw01.n), inches_predicted_deweighted:= inches_predicted/stipw01.n]
## get the wtd_population_mean in there:
## skip this time: ##weighted_fpc.test[,wtd_pop_mean:=fpca_wtd_fncs$mu,by=newid]
##for now, if don't have observed data there, we just imputed the weighted mean
## kinda lame, think on it.
## skip this time: ##weighted_fpc.test[is.na(stipw01.n), inches_predicted_deweighted:= wtd_pop_mean, by=newid]
## end extra steps:
setkey(weighted_fpc.test, newid, age)#, inches_predicted_deweighted)
setnames(weighted_fpc.test, "inches_predicted", "inches_predicted_old")
setnames(weighted_fpc.test, "inches_predicted_deweighted", "inches_predicted")
weighted_fpc <- subset(weighted_fpc.test, select=selected)
## add these on post dean: minutes and number of principle components (npc)
weighted_fpc[, minutes:=weighted_fpc_proc_minutes]
weighted_fpc[, number_pc:=fpca_wtd_fncs$npc]
## visual checks
ggplot(data=weighted_fpc[newid %in% c(1,2,5, 500, 999,1000)],
aes(x=age, y=inches_predicted, color=factor(newid)))+
geom_path()+
geom_point()+
geom_line(aes(y=inches_ltfu, id=factor(newid)), color='black' )+
geom_point(aes(y=inches_ltfu, id=factor(newid), shape=factor(newid)), color='black')
##f) lme
naive_lme<-tryCatch(
{
#naive_lme_model<-lme(inches ~ bs(age, df=15), random=~1|newid, data=observed_with_stipw);
#naive_lme <- data.frame(age=age_vec, V1=predict(naive_lme_model, newdata=data.frame(age=age_vec), level=0), approach="naive_lme")
## for 7*12 timepts and 1000 subjects, df=7 gives warning. Stay at df=5.
## for 7*12 timepts and 5000 subjects, df=5 gives FATAL ERROR for ses_coef=0.05
## for 7*12 timepts and 5000 subjects, df=5 is OK for ses_coef=0.01
naive_lme_proc_minutes <- system.time(
naive_lme_model<-lmer(inches ~ bs(age, df=5) + (bs(age, df=5)|newid), data=observed_with_stipw)
)[3]/60
## re.form=~0
#naive_lme <- data.frame(age=age_vec, V1=predict(naive_lme_model, newdata=data.frame(age=age_vec), re.form=~0), approach="naive_lme")
## re.form=~1
naive_lme <- data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = predict(naive_lme_model,
newdata=all_with_stipw),
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="naive_lme",
minutes = naive_lme_proc_minutes,
number_pc = NA)
},
warning =function(cond){
write.csv(observed_with_stipw, paste0("data_that_failed_nlme_lme_fit_",abs(rnorm(1,100,100)),".csv"), row.names=FALSE)
## re.form=~0
##naive_lme <- data.frame(age=age_vec, V1=NA, approach="naive_lme") ;
naive_lme <- data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = NA,
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="naive_lme",
minutes = naive_lme_proc_minutes,
number_pc = NA)
},
error =function(cond){
write.csv(observed_with_stipw, paste0("data_that_failed_nlme_lme_fit_",abs(rnorm(1,100,100)),".csv"), row.names=FALSE)
## re.form=~0
#naive_lme <- data.frame(age=age_vec, V1=NA, approach="naive_lme") ;
naive_lme <- data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = NA,
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="naive_lme",
minutes = naive_lme_proc_minutes,
number_pc = NA)
})
setkey(naive_lme, newid, age)
# quick checks:
# summary(naive_lme)
## visual checks
ggplot(data=naive_lme[newid %in% c(1,2,5, 500, 999,1000)],
aes(x=age, y=inches_predicted, color=factor(newid)))+
geom_path()+
geom_point()+
geom_line(aes(y=inches_ltfu, id=factor(newid)), color='black' )+
geom_point(aes(y=inches_ltfu, id=factor(newid), shape=factor(newid)), color='black')
##f) weighted_remean_lme
wtd_remean_lme<-tryCatch(
{
#naive_lme_model<-lme(inches ~ bs(age, df=15), random=~1|newid, data=observed_with_stipw);
#naive_lme <- data.frame(age=age_vec, V1=predict(naive_lme_model, newdata=data.frame(age=age_vec), level=0), approach="naive_lme")
# wtd_remean_lme_model<-lmer(inches_wtd_remean ~ bs(age, df=15) + (1|newid), data=all_with_stipw,
# na.action=na.omit);
## for 7*12 timepts and 1000 subjects, df=7 gives warning. Stay at df=5.
wtd_remean_proc_minutes <- system.time(
wtd_remean_lme_model<-lmer(inches_wtd_remean ~ bs(age, df=5) + (bs(age, df=5)|newid), data=all_with_stipw,
na.action=na.omit)
)[3]/60
## re.form=~0
#naive_lme <- data.frame(age=age_vec, V1=predict(naive_lme_model, newdata=data.frame(age=age_vec), re.form=~0), approach="naive_lme")
## re.form=~1
wtd_remean_lme <-
data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = predict(wtd_remean_lme_model,
newdata=all_with_stipw),
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="wtd_remean_lme",
minutes = wtd_remean_proc_minutes,
number_pc = NA)
},
warning =function(cond){
write.csv(observed_with_stipw, paste0("data_that_failed_nlme_lme_fit_",abs(rnorm(1,100,100)),".csv"), row.names=FALSE)
## re.form=~0
##naive_lme <- data.frame(age=age_vec, V1=NA, approach="naive_lme") ;
wtd_remean_lme <- data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = NA,
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="wtd_remean_lme",
minutes = wtd_remean_proc_minutes,
number_pc = NA)
},
error =function(cond){
write.csv(observed_with_stipw, paste0("data_that_failed_nlme_lme_fit_",abs(rnorm(1,100,100)),".csv"), row.names=FALSE)
## re.form=~0
#naive_lme <- data.frame(age=age_vec, V1=NA, approach="naive_lme") ;
wtd_remean_lme <- data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = NA,
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="wtd_remean_lme",
minutes = wtd_remean_proc_minutes,
number_pc = NA)
})
setkey(wtd_remean_lme, newid, age)
## quick checks:
# summary(wtd_remean_lme)
## visual checks
ggplot(data=wtd_remean_lme[newid %in% c(1,2,5, 500, 999,1000)],
aes(x=age, y=inches_predicted, color=factor(newid)))+
geom_path()+
geom_point()+
geom_line(aes(y=inches_ltfu, id=factor(newid)), color='black' )+
geom_point(aes(y=inches_ltfu, id=factor(newid), shape=factor(newid)), color='black')
## I have two wtd_lme chunks -- only have one uncommented at a time!
## the one immediately preceding this comment is lme(of wtd inches);
## whereas the other one below it are lme4::lmer of inches.
# wtd_lme<-tryCatch(
# {
# wtd_lme_model<-lme(inches_wtd_hadamard ~ bs(age, df=15), random=~1|newid, data=wtd_trajectories,
# na.action=na.omit);
# ##predict(wtd_lme_model, newdata=data.frame(age=age_vec), level=0)
# wtd_lme <- data.frame(age=age_vec, V1=predict(wtd_lme_model, newdata=data.frame(age=age_vec), level=0), approach="wtd_lme")
# },
# warning =function(cond){
# wtd_lme <- data.frame(age=age_vec, V1=NA, approach="wtd_lme") ;
# wtd_lme
# },
# error =function(cond){
# wtd_lme <- data.frame(age=age_vec, V1=NA, approach="wtd_lme") ;
# wtd_lme
# })
# summary(wtd_lme)
#
wtd_lme<-tryCatch(
{
# wtd_lme_model<-lme(inches_wtd_hadamard ~ bs(age, df=15), random=~1|newid, data=wtd_trajectories,
# na.action=na.omit);
# wtd_lme_model<-lmer(inches_wtd_hadamard ~ bs(age, df=15) + (1|newid), data=wtd_trajectories,
# na.action=na.omit)
wtd_lme_proc_minutes <- system.time(
wtd_lme_model<-lmer(inches_wtd_hadamard ~ bs(age, df=5) + (bs(age, df=5)|newid), data=wtd_trajectories,
na.action=na.omit)
)[3]/60
##predict(wtd_lme_model, newdata=data.frame(age=age_vec), level=0)
##wtd_lme <- data.frame(age=age_vec, V1=predict(wtd_lme_model, newdata=data.frame(age=age_vec), level=0), approach="wtd_lme")
## note: below, use re.form=~0 in lme4:predict is equivalent to level=0 in nlme:lme
## re.form=~0
##wtd_lme <- data.frame(age=age_vec, V1=predict(wtd_lme_model, newdata=data.frame(age=age_vec), re.form=~0), approach="wtd_lme")
#wtd_lme_pop_mean <- data.frame(age=age_vec, V1=predict(wtd_lme_model, newdata=data.frame(age=age_vec), re.form=~0), approach="wtd_lme")
## re.form=~1
wtd_lme1 <- data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = predict(wtd_lme_model,
newdata=all_with_stipw),
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="wtd_hadamard_lme")
wtd_lme.test<- wts[wtd_lme1]
## see how many 0's
wtd_lme.test[,table(round(stipw01.n,2))]
## change 0's to NA
wtd_lme.test[stipw01.n==0, stipw01.n:=NA]
wtd_lme.test[, inches_predicted_weighted:= inches_predicted]
wtd_lme.test[!is.na(stipw01.n), inches_predicted_deweighted:= inches_predicted/stipw01.n]
## get the wtd_population_mean in there:
## skip this time ## wtd_lme.test[,wtd_pop_mean:=wtd_lme_pop_mean$V1,by=newid]
##for now, if don't have observed data there, we just imputed the weighted mean
## kinda lame, think on it.
## skip this time ## wtd_lme.test[is.na(stipw01.n), inches_predicted_deweighted:= wtd_pop_mean, by=newid]
## end extra steps:
setkey(wtd_lme.test, newid, age)#, inches_predicted_deweighted)
setnames(wtd_lme.test, "inches_predicted", "inches_predicted_old")
setnames(wtd_lme.test, "inches_predicted_deweighted", "inches_predicted")
wtd_lme <- subset(wtd_lme.test, select=selected)
wtd_lme[,minutes:=wtd_lme_proc_minutes]
wtd_lme[,number_pc:=NA]
},
warning =function(cond){
write.csv(wtd_trajectories, paste0("data_that_failed_lme4_lmer_fit_",abs(rnorm(1,100,100)),".csv"), row.names=FALSE)
#wtd_lme <- data.frame(age=age_vec, V1=cond, approach="wtd_lme") ;
wtd_lme <- data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = NA,
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="wtd_hadamard_lme",
minutes=wtd_lme_proc_minutes,
number_pc=NA)
},
error =function(cond){
write.csv(wtd_trajectories, paste0("data_that_failed_lme4_lmer_fit_",abs(rnorm(1,100,100)),".csv"), row.names=FALSE)
#wtd_lme <- data.frame(age=age_vec, V1=cond, approach="wtd_lme") ;
wtd_lme <- data.table(newid = all_with_stipw$newid,
age = all_with_stipw$age,
ses = all_with_stipw$ses,
inches = all_with_stipw$inches,
inches_wtd_hadamard = all_with_stipw$inches_wtd_hadamard,
inches_wtd_remean = all_with_stipw$inches_wtd_remean,
inches_ltfu = all_with_stipw$inches_ltfu,
inches_predicted = NA,
remaining = all_with_stipw$remaining,
stipw = all_with_stipw$stipw,
stipw01 = all_with_stipw$stipw01,
stipw01.n = all_with_stipw$stipw01.n,
approach="wtd_hadamard_lme",
minutes=wtd_lme_proc_minutes,
number_pc=NA)
})
setkey(wtd_lme, newid, age)
## quick checks:
# summary(wtd_lme)
## visual checks
ggplot(data=wtd_lme[newid %in% c(1,2,5, 500, 999,1000)],
aes(x=age, y=inches_predicted, color=factor(newid)))+
geom_path()+
geom_point()+
geom_line(aes(y=inches_ltfu, id=factor(newid)), color='black' )+
geom_point(aes(y=inches_ltfu, id=factor(newid), shape=factor(newid)), color='black')
#rbind it!!!!!!!!!!!!!
## plot each approach's mean
## means <- rbind(true_avg, naive_non_parm_avg, wtd_non_parm_avg, naive_fpc, naive_fpc_pabw, weighted_fpc, naive_lme, wtd_lme)
# note: we're simplifying to (w)fpca and (w)lme, so we rbind() fewer than line above (if you decided to add more approaches later,
# make sure they have same format):
means <- rbind(naive_fpc, weighted_fpc, weighted_remean_fpc, naive_lme, wtd_lme, wtd_remean_lme)
means[, newid := as.integer(newid)]
means[, remaining := as.integer(remaining)]
setkey(means, "newid","age")
didya<-dcast(means, newid+age ~ approach, value.var="inches_predicted")
## after rbind multiple instances, use this to melt it
##melt.didya <- melt(didya, id.vars=c("newid","age"), variable="approach", value="inches_predicted")
# ## make additions columnwise, not rbind-wise. Save those GBs. Can melt once in memory.
# key(naive_fpc)
# key(weighted_fpc)
# key(weighted_remean_fpc)
# key(naive_lme)
# key(wtd_lme)
# key(wtd_remean_lme)
#
base.select <-
c("newid","age", "ses",
"inches", "inches_wtd_hadamard","inches_wtd_remean", "inches_ltfu",
# "inches_predicted",
"remaining",
"stipw",
"stipw01",
"stipw01.n"#,
# "approach"
)
base <- subset(naive_fpc, select=base.select)
base_means <- base[didya]
# from older files:
# means$approach<-factor(means$approach, levels=unique(means$approach))
# colnames(means)[colnames(means)=="V1"]<- "inches"
## next three chunks deal with missingness, then we rbind it us with means....
## this is a useful chunk to check the range of
## probality of being censored (prob.cens)
# ddply(censored, .(age), function(w) sum(w$instudy==1) )
# melt.prob.cens=ddply(censored, .(newid,age), function(w) w$prob.cens )
# dcast.prob.cens=dcast(melt.prob.cens, newid~age, value.var="V1")
# apply(dcast.prob.cens, 2, function(w) round(range(w),2))
# head(censored,18)
## do the following to get precent missing at age 18:
## overall:
dcast.wtd.trajectories<-dcast(calculate_wtd_trajectories(calculate_stipw(censored,"keep")), newid~age, value.var="stipw")
percent.missing.at.age.18=sum(is.na(dcast.wtd.trajectories["18"]))/length(unlist(dcast.wtd.trajectories["18"]))
percent.missing = colSums(is.na(dcast.wtd.trajectories[,-1]))/length(unlist(dcast.wtd.trajectories["18"]))
## below/above median SES:
dcast.wtd.trajectories<-dcast(calculate_wtd_trajectories(calculate_stipw(censored,"keep")), newid+ses~age, value.var="stipw")
medianSES<-median(dcast.wtd.trajectories$ses, na.rm=TRUE)
subbie<-subset(dcast.wtd.trajectories, ses <= medianSES,"18")
percent.missing.at.age.18.below.median=sum(is.na(subbie))/nrow(subbie)
subbie<-subset(dcast.wtd.trajectories, ses <= medianSES, select=c(-1,-2))
percent.missing.below.median=colSums(is.na(subbie))/nrow(subbie)
subbie<-subset(dcast.wtd.trajectories, ses > medianSES,"18")
percent.missing.at.age.18.above.median=sum(is.na(subbie))/nrow(subbie)
subbie<-subset(dcast.wtd.trajectories, ses > medianSES, select=c(-1,-2))
percent.missing.above.median=colSums(is.na(subbie))/nrow(subbie)
## note: this cbind() works because every age is present for each dataset in `means`
## and the only non-scalars are vectors that are same length as number of age-levels
## return:
results <- cbind(
sim_seed = as.integer(sim_seed),
sample_size = as.integer(sample_size),
sim_slope = as.integer(sim_slope),
sim_intercept = as.integer(sim_intercept),
sim_ses_coef = sim_ses_coef,
sim_age_coef = sim_age_coef,
perc_ltfu_18 = percent.missing.at.age.18,
percent_missing = percent.missing,
percent_missing_below_median = percent.missing.below.median,
percent_missing_above_median = percent.missing.above.median,
#means,
base_means)
## note: choose means OR base_means. means is from rbind() above and multiplies rows by 6.
## started tinkering with Out Sample predictions...please see `outsample_sim.R`
#
# ##a) naive-nonparametric: that is, just na.rm=TRUE and turn the mean() crank
# ## the best we can do to predict for those missing is to predict the mean curve for everyone
#
#
#
#
#
# ## DO SOME PREDICTIONS
# ## put the following as function inputs... then remove!
# oos_sample_size=100
# oos_age=5
# oos_ind=as.numeric(names(d[,-1])) <= oos_age
# oos_timepoints=as.numeric(names(d[,-1]))[oos_ind]
# oos<-sample_data_fpc(d, oos_sample_size, seed=sim_seed, timepoints=as.numeric(names(d[,-1])))
# oos.early <- oos
# oos.early[,!c(TRUE, oos_ind)] <- NA
# ##oos.early <- rbind(c(999999,fpca_wtd_fncs$mu), oos.early)
#
# oos.early <- oos[,c(TRUE, oos_ind)]
#
#
# oos.late <- oos[,c(TRUE, !oos_ind)]
#
# ##e) weighted-FPC.
# ##wtd_fncs <- dcast(data=wtd_trajectories, formula= newid~age, value.var="inches_wtd_hadamard")
# fpca_wtd_fncs_pred_oos <- fpca.face(Y=as.matrix(wtd_fncs[,-1]), Y.pred=as.matrix(oos.early[,-1]), argvals=age_vec, knots=10)
#
# weighted_fpc_pred_oos <- data.frame(age=age_vec, V1=fpca_wtd_fncs_pred_oos$mu, approach="weighted_fpc_pred_oos")
#
# fpca_wtd_fncs_pred_oos$Yhat
# fpca_wtd_fncs_pred_oos$scores
#
# oos<-sample_data_fpc(d[,c(TRUE,oos_ind)], oos_sample_size, seed=sim_seed, timepoints=oos_timepoints, knots.face=4)
#
#
#
## currently not using; going to put some of these in RDS itself
# label=paste("sim_seed", sim_seed,
# "sample_size", sample_size,
# "sim_slope", sim_slope,
# "sim_intercept", sim_intercept,
# "sim_ses_coef", sim_ses_coef,
# "sim_age_coef", sim_age_coef,
# sep="_")
idnum1<-abs(round(10000000*rnorm(1)))
midletter<-sample(letters)[1]
idnum2<-abs(round(10000000*rnorm(1)))
##saveRDS(results,paste0("results_",label,".RDS" ))
saveRDS(results,paste0("results_",idnum1, midletter,idnum2, ".RDS"))
setkey(means, approach)
proc_minutes_number_pcs<-subset(unique(means),
select=c("approach","minutes","number_pc"))
write.csv(proc_minutes_number_pcs,
paste0("proc_minutes_number_pcs_",idnum1, midletter,idnum2, ".csv"),
row.names=FALSE)
}
|
eb50c80d7a75f384f12de238eae7da836248bbca
|
ca9d8611fd0612b75df5420c28e9a6d3a21eb630
|
/man/plot_ndfa.Rd
|
6a82623231f9faae66a635b46a65a28301d593d9
|
[] |
no_license
|
cran/pooling
|
f949c18d80469acf5a4e93444ec2a5f2dc8d1394
|
7f00174fbc6b2a994c4b2fcf9cb13c15852395b1
|
refs/heads/master
| 2020-03-13T13:35:14.926732
| 2020-02-13T05:10:03
| 2020-02-13T05:10:03
| 131,141,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,054
|
rd
|
plot_ndfa.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_ndfa.R
\name{plot_ndfa}
\alias{plot_ndfa}
\title{Plot Log-OR vs. X for Normal Discriminant Function Approach}
\usage{
plot_ndfa(estimates, varcov = NULL, xrange, xname = "X",
cvals = NULL, set_labels = NULL, set_panels = TRUE)
}
\arguments{
\item{estimates}{Numeric vector of point estimates for
\code{(gamma_0, gamma_y, gamma_c^T, sigsq)}.}
\item{varcov}{Numeric matrix with variance-covariance matrix for
\code{estimates}. If \code{NULL}, 95\% confidence bands are omitted.}
\item{xrange}{Numeric vector specifying range of X values to plot.}
\item{xname}{Character vector specifying name of X variable, for
plot title and x-axis label.}
\item{cvals}{Numeric vector or list of numeric vectors specifying covariate
values to use in log-odds ratio calculations.}
\item{set_labels}{Character vector of labels for the sets of covariate
values. Only used if \code{cvals} is a list.}
\item{set_panels}{Logical value for whether to use separate panels for each
set of covariate values, as opposed to using different colors on a single
plot.}
}
\value{
Plot of log-OR vs. X generated by \code{\link[ggplot2]{ggplot}}.
}
\description{
When \code{\link{p_ndfa}} is fit with \code{constant_or = FALSE}, the
log-OR for X depends on the value of X (and covariates, if any). This
function plots the log-OR vs. X for one or several sets of covariate values.
}
\examples{
# Fit discriminant function model for poolwise X vs. (Y, C), without assuming
# a constant log-OR. Note that data were generated with a constant log-OR of
# 0.5.
data(dat_p_ndfa)
dat <- dat_p_ndfa$dat
fit <- p_ndfa(
g = dat$g,
y = dat$numcases,
xtilde = dat$x,
c = dat$c,
errors = "neither",
constant_or = FALSE
)
# Plot estimated log-OR vs. X, holding C fixed at the sample mean.
p <- plot_ndfa(
estimates = fit$estimates,
varcov = fit$theta.var,
xrange = range(dat$x[dat$g == 1]),
cvals = mean(dat$c / dat$g)
)
p
}
|
b1b7721f0162807a84fa313f199b67e84241d721
|
e3c9185a5485ad64adeb93bf6b1a4144b78fcddd
|
/man/test_ghost_csv.Rd
|
c2a3fa37be346938694bc65715a24c0ebc6ad92b
|
[] |
no_license
|
cran/Ghost
|
fba034fb25bed2d524d2d8127872cc5fbbe24c6c
|
f5d953c07c768fd9778d5e8554f0dd21771f436f
|
refs/heads/master
| 2022-04-22T04:09:01.554367
| 2020-03-25T15:50:05
| 2020-03-25T15:50:05
| 250,132,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
rd
|
test_ghost_csv.Rd
|
\name{test_ghost_csv}
\alias{test_ghost_csv}
\docType{data}
\title{
A simple .csv file to use in the reconstruct function.
}
\description{A simple .csv file to use in the reconstruct function. }
\usage{data("test_ghost_csv")}
\keyword{internal}
|
d92532b589651aacf0b6bb1c967fd4462ebfa0fb
|
f02e02d6d797a7da2879b04022d088024798187d
|
/Advanced R Programming/lab05/lab05_result/tests/testthat.R
|
57ddfacc6ebcef0e563b4b68483fb660e0cc91af
|
[
"MIT"
] |
permissive
|
lennartsc/MSc-Statistics-and-Machine-Learning
|
aa0d920955f12daf79c01d3233fc6381d5923672
|
57905f689db794ca9bfe4775859106942d80456a
|
refs/heads/master
| 2023-05-26T23:48:08.461875
| 2020-07-07T10:16:57
| 2020-07-07T10:16:57
| 214,446,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(rLab5)
test_check("rLab5")
|
a5d5b478de33b01f5bc58423e62368c862ae946d
|
29ac020305d01a8caea77929e5859a5aaf707ac3
|
/lab11-shiny-apps/app3.R
|
08777453a9f6432037c3cba0fcb1e1b0208dd672
|
[] |
no_license
|
yhed10/stat133-fall-2018
|
8fa769239acaeec06853ed76865daf41b3835cb9
|
03443922c409a59fd2ec2ec191fd65c04c2d82dd
|
refs/heads/master
| 2020-03-31T02:12:33.129387
| 2018-12-05T22:05:07
| 2018-12-05T22:05:07
| 151,683,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,385
|
r
|
app3.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Drawing Balls Experiment"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("repetitions",
"Number of repetitions:",
min = 1,
max = 5000,
value = 100),
sliderInput("pb",
"Threshold for choosing boxes:",
min = 0,
max = 1,
value = 0.5),
numericInput("sd",
label = "Choose a random seed",
value = 12345)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
box1 <- c('blue','blue','red')
box2 <- c('blue','blue',rep('red',3),'white')
boxs <- c('box1', 'box2')
drawn_balls <- matrix(NA, nrow = input$repetitions, ncol = 4 )
set.seed(input$sd)
for (j in 1: 4)
for(i in 1:input$repetitions)
{
if (runif(1) > input$pb){
drawn_balls[i, j] <- sample(box1, size = 4, replace = TRUE)[j]
}
else {
drawn_balls[i, j] <- sample(box2, size = 4, replace = TRUE)[j]
}
}
numbers <- c()
for(i in 1: input$repetitions){
num_TF <- drawn_balls[i,] == 'blue'
numbers[i] <- length(num_TF[num_TF == TRUE])
}
dat <- as.data.frame(numbers)
dat$reps <- 1: input$repetitions
dat$freqs <- c()
for (i in 1:length(dat$reps)){
dnumber <- dat$number[1:i]
dat$freqs[i] = length(dnumber[dnumber == dat$number[i]])/i
}
number <- as.factor(numbers)
ggplot(dat, aes(x = reps, y = freqs, group = number, color = number)) + geom_line()+ scale_color_manual(values = c("red", "yellow", "green", "blue", "purple")) + ggtitle("Relative frequencies of number of blue balls")
})
}
shinyApp(ui = ui, server = server)
|
5739c56b4b061058dcb53b60ad711186a4d676c0
|
1add06860b78f44735db2994e4f3900b7918dd44
|
/man/hammingDist.Rd
|
2b03e55ccdcb1ebb821955492ec66be4559798b4
|
[
"MIT"
] |
permissive
|
ABohynDOE/mldoeR
|
34906ba6b226f653df32aba78859e98b43258e15
|
44116e985001a858bf39b0cde530e3513999b619
|
refs/heads/master
| 2023-08-21T13:49:53.661924
| 2021-10-05T09:53:04
| 2021-10-05T09:53:04
| 395,269,267
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 527
|
rd
|
hammingDist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{hammingDist}
\alias{hammingDist}
\title{Hamming distance between two vectors}
\usage{
hammingDist(a, b)
}
\arguments{
\item{a}{first vector}
\item{b}{second vector}
}
\value{
Hamming distance between \code{a} and \code{b}
}
\description{
\code{hammingDist(a,b)} finds the hamming distance between two vectors \code{a} and \code{b}.
}
\details{
The hamming distance is the number of elements that differs between
two arrays.
}
|
667d99c8501efb5a8aaf617ab2b998b7bfcc7fd7
|
f0cf9bdb92ba3df4a780f143daee5603f4c4dd0f
|
/Second/CienciaDeDatos/ProyectoFinal/gray.R
|
8a0a7cb01a941774044ec4bc8ad791fadf8b8844
|
[
"MIT"
] |
permissive
|
fou-foo/MCE
|
b321abaa53479e428989d2f847bdc9c5dc7c225e
|
a279ed86fa31f89b0233257313ff3f72da9aab92
|
refs/heads/master
| 2021-06-05T21:33:27.121270
| 2020-11-15T02:27:04
| 2020-11-15T02:27:04
| 98,847,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,442
|
r
|
gray.R
|
setwd('C:\\Users\\fou-f\\Desktop\\MCE\\Second\\AnalisisNumericoYOptimizacion\\ProyectoFinal')
library(Rcpp) #libreria para codigo c++
library(RcppEigen) #libreria para codigo c++
library(RSpectra) #libreria para lanczos
library(imager) #libreria para leer imagenes
library(rtiff)
library(abind)
sourceCpp('W1.cpp') #compilamos el programa en C++
t1 <- Sys.time() #medimos tiempo de ejecucion
dir()
#imagen <- image_read(path = '001.tif' )
imagen <- readTiff('001.tif', page = 0, reduce = 0, pixmap = FALSE)
image <- lapply(imagen, FUN = function(x) return(t(x)))
image <- abind(image, along = 3)
image <- as.cimg(image)
#plot(image)
#display(image)
t1 <- t1 -Sys.time()
dim(image)
#############preprosesamiento 160*120 jala bien, recortamos la imaagen para que quepa en memoria
gray.imagen <- grayscale(image) #cambiamos a escala de grises
gray.imagen <- resize(im = gray.imagen, size_x=1500 , size_y = 2239/2, size_z = 1, size_c = 1 )
plot(gray.imagen)
dim(gray.imagen) #verificamos tamanio de la imagen
remove(imagen) #removemos del ambiente la imagen original para ahorra memoria
gc()
#estandarizacion escala de grises
M <- as.matrix(gray.imagen)
M <- (M -min(M))/(max(M)-min(M))
sig <- 1
siz <- 2*round(3*sig) + 1
(h <- dim(M)[2])
(w <- dim(M)[1] )
vecinos <- .001
edges <- ((h*w)*(h*w-1))*vecinos #segun el paper se pueden remover hasta el 90% de las aristas deberia de ser ((h*w)*(h*w+1)/2)*.1
edges.sugerido <- edges/(h*w) #promedio de aristas por nodo
cuadrado <- edges.sugerido**.5 # vamos a fijar esta cantidad
cuadrado <- round(cuadrado) +1
sigJ <- 0.05 #ver paper
sigd <- 10#ver paper
r2 <- cuadrado**2
dim(M)
plot(as.cimg(M))
t.w <- Sys.time()
W <- Kernel_float( M, h, w, r2, sigJ, sigd)
print('tick1')
t.w <- Sys.time()- t.w
remove(M) #removemos matriz para ahorrar espacio
gc()
W <- as(W, "sparseMatrix") #casteamos a clase 'sparseMatrix'
gc()
hist(as.matrix(W))
d <- Matrix::colSums(W) #obtenemos suma por columnas
D_medio <- Matrix::.sparseDiagonal(n = h*w, x = d**(-.5) ) #calculamos la matriz D^{-1/2} para el problema de valores propios generalizado
W <- D_medio%*%(Matrix::.sparseDiagonal(n = h*w, x = d ) -W)%*%D_medio
print('tick2')
Z <- eigs_sym(W, k=3, which='LM', sigma = 0) #usamos lanczos
Z$values #visualizamos los tres valores propios mas pequenios
remove(W) #ahorramos RAM
remove(d)
gc()
print('tick3')
#####################
Y1 <- D_medio%*%(Z$vectors[,1]) #MI GRAN DUDA ERA ESTA, como usar los vectores propios que se encontraron para segmentar
hist(as.matrix(Y1)) #esto se puede omitir para matrices grandes
Y2 <- D_medio%*%Z$vectors[,2]
hist(as.matrix(Y2))
remove(D_medio) #ahorramos espacio
gc()
set.seed(0)
kmeans <- kmeans(unlist(Y2), centers = 2, nstart = 50)
print(table(kmeans$cluster))
mascara <- matrix(kmeans$cluster-1, ncol = h, byrow = TRUE)
segmentacion <- mascara
table(segmentacion)
imagen.segmentacion <- as.cimg(round(segmentacion,1))
plot(imagen.segmentacion)#CON MADRE
set.seed(0)
data.a.segmentar <- cbind(Y1,Y2 )
kmeans2 <- kmeans(data.a.segmentar, centers = 3, nstart = 50)
kmeans2$cluster <- kmeans2$cluster-1
kmeans2$cluster <- kmeans2$cluster/2
print(table(kmeans2$cluster))
mascara <- matrix(kmeans2$cluster, ncol = h, byrow = TRUE)
segmentacion <- mascara
imagen.segmentacion <- as.cimg((round(segmentacion,1)))
plot(imagen.segmentacion)#CON MADRE
t1 <- Sys.time() -t1
print(t1)
gc()
|
56f85e775e46b86c0b7e41488bbc9f1b92d47c1b
|
8dd4d96e9a642727540bab3c9a277f220153f33b
|
/week4/rankhospital.R
|
3f24066ab71f846600dc0ef9aba88b342c89596f
|
[] |
no_license
|
MarkSinke/datasciencecoursera
|
4af42380d5a80431d986c95ecdbf0bff43e825f6
|
ea2569e0c5ee6ec19cb9abfe25ed46b92e6937f6
|
refs/heads/master
| 2021-01-15T21:14:49.433771
| 2014-07-25T21:25:38
| 2014-07-25T21:25:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best") {
colNo <- switch(outcome, "heart attack" = 11, "heart failure" = 17, "pneumonia" = 23)
if (!is.numeric(colNo)) {
stop("invalid outcome")
}
fullData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
fullData[, colNo] <- suppressWarnings(as.numeric(fullData[, colNo]))
# 7 = state column
stateData <- subset(fullData, fullData[, 7] == state & fullData[, colNo] != "Not Available")
if (nrow(stateData) == 0) {
stop("invalid state")
}
# 2 = Hospital.Name
ordered <- stateData[order(stateData[, colNo], stateData[, 2]), ]
index <- if (num == "best") 1 else if (num == "worst") nrow(ordered) else as.integer(num)
as.character(ordered[index,2])
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.