blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8b82be4a695151ca03e222c5bc7be433b445083 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /decido/inst/testfiles/earcut_cpp/libFuzzer_earcut_cpp/earcut_cpp_valgrind_files/1609874256-test.R | 8048e90b4c6966210867077eda2e1150d8aa9204 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 687 | r | 1609874256-test.R | testlist <- list(holes = integer(0), numholes = integer(0), x = numeric(0), y = c(0, 1.78005908680576e-307, 0, 9.99544897376282e-310, 1.36361579985924e-309, 1.44282677639714e-284, 2.71623588394182e-312, 5.99897363812972e-310, 1.11998925342406e-299, 5.220109682454e-312, -3.90416067888732e+284, 0, 9.99544897376282e-310, 1.36361579985924e-309, 3.95252516672997e-323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(decido:::earcut_cpp,testlist)
str(result) |
bc6ad79a5c270ae02912726b99475582521b4577 | 84ee8997f3926f143c6ad66e317c0c941c1abdba | /scripts/gphocs/gphocs_plot_fun_msp3.R | f331b50660a25e5e69f0d4119ee3adf4fb80e78f | [] | no_license | jelmerp/lemurs_msp3 | 50c527bde88619636d458e30a8a5520d007e22d0 | 81ad98e1c51ef4ff208927b150138e2c659beb04 | refs/heads/master | 2020-06-29T09:44:16.790603 | 2020-06-23T12:31:44 | 2020-06-23T12:31:44 | 200,502,209 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,198 | r | gphocs_plot_fun_msp3.R | ## This scripts contains G-PhoCS plotting wrappers specifically for
## the msp3 project.
#### PREP DF FOR DEMOGRAPHY PLOT: 3-SPECIES MODEL ------------------------------
ttprep_3sp <- function(
Log = NULL, tt = NULL, poplist, popcols = NULL,
summary.provided = FALSE,
pops.fixed = NULL, pops.fixed.size = 1,
pop.spacing = 1, popnames = NULL,
x.even = FALSE
) {
childpops <- names(poplist)
parentpops <- as.character(poplist)
allpops <- union(childpops, parentpops)
currentpops <- setdiff(allpops, parentpops)
if(summary.provided == FALSE) {
tt <- Log %>%
filter(var %in% c('theta', 'tau')) %>%
group_by(pop, var) %>%
summarise(cval.mean = mean(cval / 1000)) %>%
pivot_wider(names_from = var, values_from = cval.mean,
values_fill = list(cval.mean = 0))
}
tt$NeToScale <- 1
if(!is.null(pops.fixed)) tt$theta[match(pops.fixed, tt$pop)] <- pops.fixed.size
## Functions:
getpos <- function(pop) match(pop, tt$pop)
get.xmin <- function(pop) tt$x.min[match(pop, tt$pop)]
get.th <- function(pop) tt$theta[match(pop, tt$pop)]
get.ta <- function(pop) tt$tau[match(pop, tt$pop)]
get.xmax <- function(pop) round(get.xmin(pop) + get.th(pop), 2)
## x start positions:
tt$x.min <- NA
tt$x.min[getpos('mac')] <- pop.spacing
tt$x.min[getpos('anc.A3')] <- get.xmax('mac')
tt$x.min[getpos('anc.sp3')] <- get.xmax('anc.A3')
tt$x.min[getpos('sp3W')] <- get.xmin('anc.sp3') - get.th('sp3W')
tt$x.min[getpos('sp3E')] <- get.xmax('anc.sp3')
tt$x.min[getpos('anc.root')] <- get.xmax('anc.A3')
tt$x.min[getpos('leh')] <- get.xmax('anc.root')
## x end positions:
tt$x.max <- round(tt$x.min + tt$theta, 2)
## y positions:
tt$y.min <- round(ifelse(tt$pop %in% currentpops, 0, tt$tau), 2)
tt$y.max <- round(ifelse(tt$pop == 'anc.root', tt$y.min + 50,
get.ta(as.character(poplist[tt$pop]))), 2)
if(!is.null(popcols)) tt$popcol <- popcols$col[match(tt$pop, popcols$pop)]
if(!is.null(popnames)) tt$pop <- popnames
return(tt)
}
#### DEMOGRAPHY PLOT WRAPPER FOR 3-SPECIES RUNS --------------------------------
dplotwrap_3sp <- function(
runID.focal, poplist, popcols,
plot.save = TRUE) {
## Prepare df underlying plot:
tt <- filter(Log, runID == runID.focal) %>%
filter(var %in% c('theta', 'tau')) %>%
group_by(pop, var) %>%
summarise(cval.mean = mean(cval / 1000)) %>%
pivot_wider(names_from = var, values_from = cval.mean, values_fill = list(cval.mean = 0))
ttp <- ttprep_3sp(
tt = tt, poplist = poplist, popcols = popcols,
x.even = FALSE, pop.spacing = 25, summary.provided = TRUE
) %>%
mutate(pop = factor(pop, levels = allpops)) %>%
arrange(pop) %>%
mutate(popcol = factor(popcol, levels = fct_inorder(popcol)))
## Main plot:
p <- dplot(
tt = ttp, plot.title = paste0(setID, ': ', runID.focal),
x.min = 0, yticks.by = 50, x.extra = 25,
popnames.adj.horz = rep(0, nrow(ttp)), popnames.adj.vert = 15,
popnames.size = 5,
saveplot = FALSE, ...
)
## Print and save:
print(p)
if(plot.save == TRUE) {
plotfile <- paste0(plotdir, '/demo/', setID, '.', runID.focal, '.demoplot.png')
ggsave(filename = plotfile, plot = p, width = 8, height = 7)
system(paste("xdg-open", plotfile))
plotfile.pdf <- paste0(plotdir, '/demo/', setID, '.', runID.focal, '.demoplot.pdf')
ggsave(filename = plotfile.pdf, plot = p, width = 8, height = 7)
}
}
#### PREP DF FOR DEMOGRAPHY PLOT: 6-SPECIES MODEL ------------------------------
ttprep_6sp <- function(
Log = NULL, tt = NULL, poplist, popcols = NULL,
summary.provided = FALSE,
pops.fixed = NULL, pops.fixed.size = 1,
pop.spacing = 1, popnames = NULL,
x.even = FALSE
) {
# Log = filter(Log, runID == 'multmig1')
# summary.provided = FALSE; pops.fixed = NULL; x.even = FALSE; popnames = NULL; pop.spacing = 25
childpops <- names(poplist)
parentpops <- as.character(poplist)
allpops <- union(childpops, parentpops)
currentpops <- setdiff(allpops, parentpops)
if(summary.provided == FALSE) {
tt <- Log %>%
subset(var %in% c('theta', 'tau')) %>%
group_by(pop, var) %>%
summarise(cval.mean = mean(cval / 1000)) %>%
pivot_wider(names_from = var, values_from = cval.mean,
values_fill = list(cval.mean = 0))
tt$pop <- as.character(tt$pop)
}
tt$NeToScale <- 1
if(!is.null(pops.fixed)) tt$theta[match(pops.fixed, tt$pop)] <- pops.fixed.size
## Functions:
getpos <- function(pop) match(pop, tt$pop)
get.xmin <- function(pop) tt$x.min[match(pop, tt$pop)]
get.th <- function(pop) tt$theta[match(pop, tt$pop)]
get.ta <- function(pop) tt$tau[match(pop, tt$pop)]
get.xmax <- function(pop) round(get.xmin(pop) + get.th(pop), 2)
## x start positions:
tt$x.min <- NA
tt$x.min[getpos('mac')] <- pop.spacing
tt$x.min[getpos('anc.A3')] <- get.xmax('mac')
tt$x.min[getpos('sp3')] <- get.xmax('anc.A3')
tt$x.min[getpos('leh')] <- get.xmax('sp3') + pop.spacing
tt$x.min[getpos('anc.LI')] <- get.xmax('leh')
tt$x.min[getpos('mit')] <- ifelse(get.xmax('anc.LI') > get.xmax('leh') + pop.spacing,
get.xmax('anc.LI'), get.xmax('leh') + pop.spacing)
tt$x.min[getpos('anc.LIS')] <- get.xmin('mit')
th.largest <- sort(c(get.th('mit'), get.th('anc.LIS')))[2]
tt$x.min[getpos('sim')] <- round(get.xmin('mit') + th.largest + pop.spacing, 2)
tt$x.min[getpos('mur')] <- get.xmax('sim') + pop.spacing
Diff1 <- ((get.xmin('anc.LIS') - get.xmax('anc.A3')) / 2) - (get.th('anc.LISA3') / 2)
tt$x.min[getpos('anc.LISA3')] <- get.xmax('anc.A3') + Diff1
Diff2 <- ((get.xmin('mur') - get.xmax('anc.LISA3')) / 2) - (get.th('anc.root') / 2)
tt$x.min[getpos('anc.root')] <- get.xmax('anc.LISA3') + Diff2
## x end positions:
tt$x.max <- round(tt$x.min + tt$theta, 2)
## y positions:
getparent <- function(child) as.character(poplist[child])
tt$y.min <- round(ifelse(tt$pop %in% currentpops, 0, tt$tau), 2)
tt$y.max <- round(ifelse(tt$pop == 'anc.root', tt$y.min + 600,
get.ta(getparent(tt$pop))), 2)
if(!is.null(popcols)) tt$popcol <- popcols$col[match(tt$pop, popcols$pop)]
if(!is.null(popnames)) tt$pop <- popnames
return(tt)
}
#### DEMOGRAPHY PLOT WRAPPER FOR 6-SPECIES RUNS --------------------------------
dplotwrap_6sp <- function(
runID.focal, poplist, popcols,
y.max = NULL, rm.y.ann = FALSE,
ylab = 'time (ka ago)', xlab = expression(N[e] ~ "(1 tick mark = 25k)"),
plot.title = NULL, plot.save = FALSE, ...
) {
if(is.null(plot.title)) plot.title <- paste0(setID, ': ', runID.focal)
## Dataframe for plotting:
tt <- filter(Log, runID == runID.focal) %>%
filter(var %in% c('theta', 'tau')) %>%
group_by(pop, var) %>%
summarise(cval.mean = mean(cval / 1000)) %>%
pivot_wider(names_from = var, values_from = cval.mean, values_fill = list(cval.mean = 0))
ttp <- ttprep_6sp(
tt = tt, poplist = poplist, popcols = popcols,
x.even = FALSE, pop.spacing = 25, summary.provided = TRUE
) %>%
ungroup() %>%
mutate(pop = factor(pop, levels = allpops)) %>%
arrange(pop) %>%
mutate(popcol = factor(popcol, levels = fct_inorder(popcol)))
## Main plot:
p <- dplot(
tt = ttp, y.max = y.max, rm.y.ann = rm.y.ann, ylab = ylab, xlab = xlab,
x.min = 0, yticks.by = 100,
popnames.adj.horz = rep(0, nrow(ttp)), popnames.adj.vert = 15,
popnames.size = 5, x.extra = 25,
saveplot = FALSE, plot.title = plot.title, ...
)
p <- p + theme(panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
axis.title.x = element_text(margin = margin(t = 30, r = 0, b = 0, l = 0)))
## Connecting lines:
p <- p + geom_segment(aes(y = ttp$y.max[ttp$pop == 'mur'],
yend = ttp$y.max[ttp$pop == 'mur'],
x = ttp$x.max[ttp$pop == 'anc.LISA3'],
xend = ttp$x.min[ttp$pop == 'mur']),
colour = 'grey50')
p <- p + geom_segment(aes(y = ttp$y.max[ttp$pop == 'anc.LIS'],
yend = ttp$y.max[ttp$pop == 'anc.LIS'],
x = ttp$x.max[ttp$pop == 'anc.A3'],
xend = ttp$x.min[ttp$pop == 'anc.LIS']),
colour = 'grey50')
p <- p + geom_segment(aes(y = ttp$y.max[ttp$pop == 'sim'],
yend = ttp$y.max[ttp$pop == 'sim'],
x = ttp$x.max[ttp$pop == 'anc.LI'],
xend = ttp$x.min[ttp$pop == 'sim']),
colour = 'grey50')
## Save plot:
if(plot.save == TRUE) {
plotfile <- paste0(plotdir, '/demo/', setID, '.', runID.focal, '.demoplot.png')
ggsave(filename = plotfile, plot = p, width = 8, height = 7)
system(paste("xdg-open", plotfile))
plotfile.pdf <- paste0(plotdir, '/demo/', setID, '.', runID.focal, '.demoplot.pdf')
ggsave(filename = plotfile.pdf, plot = p, width = 8, height = 7)
}
print(p)
return(p)
}
#### PREP GPHOCS RESULTS FOR MSMC COMPARISON PLOT ------------------------------
gphocs_Ne_prep <- function(Log, setID, poplist) {
#Log = filter(Log_3sp, runID == 'multmig3'); poplist = poplist_3sp
gphocsNe <- ttprep_3sp(Log = Log, poplist = poplist) %>%
select(pop, tau, theta) %>%
rename(t.min = tau, Ne = theta) %>%
filter(pop %in% c('anc.root', 'sp3', 'anc.A3', 'sp3E')) %>%
mutate(t.max = NA,
Ne = Ne * 1000,
t.min = t.min * 1000,
ID = setID)
gphocsNe$t.min[which(is.na(gphocsNe$t.min))] <- 0
gphocsNe$t.max[gphocsNe$pop == 'anc.root'] <- 10e7
gphocsNe$t.max[gphocsNe$pop == 'sp3E'] <- gphocsNe$t.min[gphocsNe$pop == 'sp3']
gphocsNe$t.max[gphocsNe$pop == 'sp3'] <- gphocsNe$t.min[gphocsNe$pop == 'anc.A3']
gphocsNe$t.max[gphocsNe$pop == 'anc.A3'] <- gphocsNe$t.min[gphocsNe$pop == 'anc.root']
gphocsNe <- gather(gphocsNe, 'aap', 'time', c('t.min', 't.max')) %>%
select(-aap) %>%
arrange(time, Ne) %>%
select(time, Ne, pop, ID)
return(gphocsNe)
}
|
84d0f5a33f41a5cc33a0a3356443f228ebf7fb41 | 0c3fa446b7aa8e5f48201e139fd7e19c351ae54d | /src/prepare_ecdc_data/R/corrections_12July.R | f06e0a9d5083a27019ee6d28b59551710b3ee3cb | [] | no_license | mrc-ide/covid19-forecasts-orderly | 477b621129ebc93dbd4e75ae4d54fbda0ad1669c | 2210ea70dc7b38d91fc1678ab0c2bb9b83c5ddcb | refs/heads/main | 2023-02-06T17:58:18.937033 | 2022-08-02T16:39:50 | 2022-08-02T16:39:50 | 254,094,782 | 33 | 12 | null | 2022-08-02T16:39:51 | 2020-04-08T13:25:08 | R | UTF-8 | R | false | false | 1,207 | r | corrections_12July.R | #####################################################################
######################################################################
######################################################################
######################################################################
########### Corrections 12th July ####################################
######################################################################
######################################################################
######################################################################
raw_data$Deaths[raw_data$DateRep == "2020-07-12" & raw_data$`Countries.and.territories` == "Algeria"] <- 8
raw_data$Deaths[raw_data$DateRep == "2020-07-11" & raw_data$`Countries.and.territories` == "Algeria"] <- 8
raw_data$Deaths[raw_data$DateRep == "2020-07-11" & raw_data$`Countries.and.territories` == "Haiti"] <- 7
raw_data$Deaths[raw_data$DateRep == "2020-07-12" & raw_data$`Countries.and.territories` == "Haiti"] <- 5
raw_data$Deaths[raw_data$DateRep == "2020-07-11" & raw_data$`Countries.and.territories` == "Ukraine"] <- 27
raw_data$Deaths[raw_data$DateRep == "2020-07-12" & raw_data$`Countries.and.territories` == "Ukraine"] <- 11
|
595ed8140e56a4249070949d8ef07c16d3641427 | bbce43dd1f7436205933f617d5c9eb7113ed4dd7 | /R/tests/testdir_jira/runit_NOPASS_hex_1799_glm_nfold_parameters.R | 593298eb2592aac19559b142c90ed0b4bb280234 | [
"Apache-2.0"
] | permissive | qicst23/h2o | 16506b05c9624b2db85c683b3abea2da27e65d35 | ddb770b9e4402abda4d2c8a1786fd5429f7f1a8c | refs/heads/master | 2021-01-15T21:03:12.458544 | 2014-07-24T08:18:07 | 2014-07-24T08:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,414 | r | runit_NOPASS_hex_1799_glm_nfold_parameters.R | ######################################################################################
# Test for HEX-1799
# h2o.glm with nfolds >= 2 should have model parameters that match the main glm model.
######################################################################################
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
options(echo=TRUE)
source('../findNSourceUtils.R')
heading("BEGIN TEST")
hex_1799_test <-
function(conn) {
path <- locate("smalldata/logreg/prostate.csv")
prostate.hex <- h2o.uploadFile(conn, path, key="prostate.hex")
main_model <- h2o.glm(x = 3:8, y = 2, data = prostate.hex, nfold = 2, standardize = FALSE, family = "binomial")
first_xval <- doNotCallThisMethod...Unsupported(conn, main_model@xval[[1]]@key)
Log.info("Expect that the xval model has a family binomial, just like the main model...")
expect_that(first_xval$glm_model$parameters$family, equals("binomial"))
expect_that(first_xval$glm_model$parameters$family, equals(main_model@model$params$family$family))
Log.info("Expect that the xval model has standardize set to FALSE as it is in the main model.")
expect_that(first_xval$glm_model$parameters$standardize, equals("true"))
expect_that(as.logical(first_xval$glm_model$parameters$standardize), equals(main_model@model$params$standardize))
testEnd()
}
doTest("Perform the test for hex 1799", hex_1799_test)
|
d71396b4fc1977e81a9422cf3e43cf226d5733e1 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.security.identity/man/cognitoidentityprovider_admin_reset_user_password.Rd | bd30f99a24d8815f7dc49f41627d1bba09403b17 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 2,413 | rd | cognitoidentityprovider_admin_reset_user_password.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentityprovider_operations.R
\name{cognitoidentityprovider_admin_reset_user_password}
\alias{cognitoidentityprovider_admin_reset_user_password}
\title{Resets the specified user's password in a user pool as an administrator}
\usage{
cognitoidentityprovider_admin_reset_user_password(
UserPoolId,
Username,
ClientMetadata = NULL
)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID for the user pool where you want to reset the user's
password.}
\item{Username}{[required] The user name of the user whose password you want to reset.}
\item{ClientMetadata}{A map of custom key-value pairs that you can provide as input for any
custom workflows that this action triggers.
You create custom workflows by assigning Lambda functions to user pool
triggers. When you use the AdminResetUserPassword API action, Amazon
Cognito invokes the function that is assigned to the \emph{custom message}
trigger. When Amazon Cognito invokes this function, it passes a JSON
payload, which the function receives as input. This payload contains a
\code{clientMetadata} attribute, which provides the data that you assigned to
the ClientMetadata parameter in your AdminResetUserPassword request. In
your function code in Lambda, you can process the \code{clientMetadata} value
to enhance your workflow for your specific needs.
For more information, see \href{https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html}{Customizing user pool Workflows with Lambda Triggers}
in the \emph{Amazon Cognito Developer Guide}.
When you use the ClientMetadata parameter, remember that Amazon Cognito
won't do the following:
\itemize{
\item Store the ClientMetadata value. This data is available only to
Lambda triggers that are assigned to a user pool to support custom
workflows. If your user pool configuration doesn't include triggers,
the ClientMetadata parameter serves no purpose.
\item Validate the ClientMetadata value.
\item Encrypt the ClientMetadata value. Don't use Amazon Cognito to
provide sensitive information.
}}
}
\description{
Resets the specified user's password in a user pool as an administrator. Works on any user.
See \url{https://www.paws-r-sdk.com/docs/cognitoidentityprovider_admin_reset_user_password/} for full documentation.
}
\keyword{internal}
|
42a0b50e48404ad22d76d07fdd123e6f71b40f0b | 7b4dd5d358e703357184b6d9e332dc19edbf773d | /plot4.R | 375bea95838ad83854d613f06335642f3c3960fa | [] | no_license | rajatparmar/ExData_Plotting1 | 0471e58b6ef2009e45b81f0dc00f51f48cc1990a | 6c842feb82778569147d060ba93a24abf74194b5 | refs/heads/master | 2021-01-18T08:47:33.096381 | 2014-07-13T11:54:52 | 2014-07-13T11:54:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,090 | r | plot4.R | plot4 <- function() {
myFile<-read.csv("./household_power_consumption.txt",sep=";",header=T,na.strings="?")
extract<-myFile[myFile$Date=="1/2/2007" | myFile$Date=="2/2/2007",]
newtime <-(strptime(paste(extract$Date, extract$Time), "%d/%m/20%y %H:%M:%S"))
extract<-cbind(extract,newtime)
par(mfrow = c(2, 2), mar = c(4, 4, 0, 0))
with(extract, plot(newtime ,Global_active_power, type="l",xlab="",ylab="Global active power"))
with(extract, plot(newtime ,Voltage, type="l",xlab="datetime",ylab="Voltage"),breaks=100)
with(extract, plot(newtime ,Sub_metering_1,type="l",xlab="",ylab="Energy Sub Metering"))
with(extract, lines(newtime ,Sub_metering_2,col="red", type="l",xlab="",ylab="Energy Sub Metering"))
with(extract, lines(newtime ,Sub_metering_3,col="blue", type="l",xlab="",ylab="Energy Sub Metering"))
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
with(extract, plot(newtime ,Global_reactive_power, type="l",xlab="datetime",ylab="Global_reactive_power"),breaks=10000)
dev.copy(png,file="plot4.png")
dev.off()
} |
70ed2de7ef8503605f34b23ee9790e7c67a1114d | f88238988b31fb01d30b3fca17d2d274d3ef7802 | /top_n6.R | b38b403b800dd05813e370598d2dfcee8f1da441 | [] | no_license | ulbstic/eurovoc_topicmodeling | a7ee832875532fa41486851f6993acfbad3e1fde | fa3eef01302ed4573e2cd63cc7284ebdfdfdecd8 | refs/heads/master | 2020-03-09T18:01:31.038361 | 2018-06-13T22:07:16 | 2018-06-13T22:07:16 | 128,839,202 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 448 | r | top_n6.R | library(readr)
library(tidyverse)
English_250_composition_with_topic_label <- read_csv("C:/Users/ettor/Desktop/English_250_composition_with_topic_label.csv")
View(English_250_composition_with_topic_label)
df <-
English_250_composition_with_topic_label %>%
group_by(document) %>%
top_n(n=6, topic) %>%
arrange(document, desc(topic))
View(df)
write_csv(df, "C:/Users/ettor/Desktop/English_250_composition_with_topic_label_sorted.csv") |
3a8e1df192b1e54ea72acf7f534d1bcc5e83b526 | a390b74357ffc5730dc3d017219c97b30b03f84a | /win-library/3.1/GenomeInfoDb/doc/GenomeInfoDb.R | 5311716a3ca78e51da81c90e36470e9de04977a2 | [] | no_license | dnyanadap/R | 2faabee9ad3bee4848f71920d04cea1a1aae7584 | e0482444e763b0a0d62c4db3fb48b37c2d985ff1 | refs/heads/master | 2021-03-22T04:18:15.148145 | 2017-10-27T23:23:51 | 2017-10-27T23:23:51 | 108,560,508 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,179 | r | GenomeInfoDb.R | ## ----style, eval=TRUE, echo=FALSE, results="asis"---------------------------------------
BiocStyle::latex()
## ----preliminaries, echo=FALSE, message=FALSE-------------------------------------------
library(GenomeInfoDb)
library(TxDb.Dmelanogaster.UCSC.dm3.ensGene)
## ----genomeStyles1----------------------------------------------------------------------
seqmap <- genomeStyles()
head(seqmap,n=2)
## ----name-------------------------------------------------------------------------------
names(genomeStyles())
## ----genomeStyles2----------------------------------------------------------------------
head(genomeStyles("Homo_sapiens"),5)
## ----style-present----------------------------------------------------------------------
"UCSC" %in% names(genomeStyles("Homo_sapiens"))
## ----extractSeqlevels-------------------------------------------------------------------
extractSeqlevels(species="Arabidopsis_thaliana", style="NCBI")
## ----extractSeqlevelsgroup--------------------------------------------------------------
extractSeqlevelsByGroup(species="Arabidopsis_thaliana", style="NCBI",
group="auto")
## ----seqlevelsStyle---------------------------------------------------------------------
seqlevelsStyle(paste0("chr",c(1:30)))
seqlevelsStyle(c("2L","2R","X","Xhet"))
## ----keepChr-txdb-----------------------------------------------------------------------
newchr <- paste0("chr",c(1:22,"X","Y","M","1_gl000192_random","4_ctg9_hap1"))
seqlevelsInGroup(newchr, group="sex")
seqlevelsInGroup(newchr, group="auto")
seqlevelsInGroup(newchr, group="circular")
seqlevelsInGroup(newchr, group="sex","Homo_sapiens","UCSC")
## ----check2-----------------------------------------------------------------------------
seqnames <- c("chr1", "chr9", "chr2", "chr3", "chr10")
all(seqnames %in% extractSeqlevels("Homo_sapiens", "UCSC"))
## ----orderSeqlevels---------------------------------------------------------------------
seqnames <- c("chr1","chr9", "chr2", "chr3", "chr10")
orderSeqlevels(seqnames)
seqnames[orderSeqlevels(seqnames)]
## ----rankSeqlevels----------------------------------------------------------------------
seqnames <- c("chr1","chr9", "chr2", "chr3", "chr10")
rankSeqlevels(seqnames)
## ----find-------------------------------------------------------------------------------
mapSeqlevels(c("chrII", "chrIII", "chrM"), "NCBI")
## ----basic-gr---------------------------------------------------------------------------
gr <- GRanges(paste0("ch",1:35), IRanges(1:35, width=5))
gr
## ----renameseqlevels--------------------------------------------------------------------
newnames <- paste0("chr",1:35)
names(newnames) <- paste0("ch",1:35)
head(newnames)
gr <- renameSeqlevels(gr,newnames)
gr
## ----dropseqlevels----------------------------------------------------------------------
dropSeqlevels(gr,paste0("chr",23:35))
## ----keepseqlevels----------------------------------------------------------------------
keepSeqlevels(gr, paste0("chr",1:22))
## ----keepstdchr-------------------------------------------------------------------------
keepStandardChromosomes(gr)
## ----keepstdchr-2-----------------------------------------------------------------------
plantgr <- GRanges(c(1:5,"MT","Pltd"), IRanges(1:7,width=5))
keepStandardChromosomes(plantgr, species="Arabidopsis thaliana")
## ----genome-description-class, message=FALSE--------------------------------------------
library(BSgenome.Celegans.UCSC.ce2)
class(Celegans)
is(Celegans, "GenomeDescription")
provider(Celegans)
seqinfo(Celegans)
gendesc <- as(Celegans, "GenomeDescription")
class(gendesc)
gendesc
provider(gendesc)
seqinfo(gendesc)
bsgenomeName(gendesc)
## ----Seqinfo-egs------------------------------------------------------------------------
## Note that all the arguments (except 'genome') must have the
## same length. 'genome' can be of length 1, whatever the lengths
## of the other arguments are.
x <- Seqinfo(seqnames=c("chr1", "chr2", "chr3", "chrM"),
seqlengths=c(100, 200, NA, 15),
isCircular=c(NA, FALSE, FALSE, TRUE),
genome="toy")
length(x)
seqnames(x)
names(x)
seqlevels(x)
seqlengths(x)
isCircular(x)
genome(x)
x[c("chrY", "chr3", "chr1")] # subset by names
## Rename, drop, add and/or reorder the sequence levels:
xx <- x
seqlevels(xx) <- sub("chr", "ch", seqlevels(xx)) # rename
xx
seqlevels(xx) <- rev(seqlevels(xx)) # reorder
xx
seqlevels(xx) <- c("ch1", "ch2", "chY") # drop/add/reorder
xx
seqlevels(xx) <- c(chY="Y", ch1="1", "22") # rename/reorder/drop/add
xx
y <- Seqinfo(seqnames=c("chr3", "chr4", "chrM"),
seqlengths=c(300, NA, 15))
y
merge(x, y) # rows for chr3 and chrM are merged
suppressWarnings(merge(x, y))
## Note that, strictly speaking, merging 2 Seqinfo objects is not
## a commutative operation, i.e., in general 'z1 <- merge(x, y)'
## is not identical to 'z2 <- merge(y, x)'. However 'z1' and 'z2'
## are guaranteed to contain the same information (i.e. the same
## rows, but typically not in the same order):
suppressWarnings(merge(y, x))
## This contradicts what 'x' says about circularity of chr3 and chrM:
isCircular(y)[c("chr3", "chrM")] <- c(TRUE, FALSE)
y
if (interactive()) {
merge(x, y) # raises an error
}
## ----quick-style------------------------------------------------------------------------
txdb <- TxDb.Dmelanogaster.UCSC.dm3.ensGene
seqlevels(txdb)
genomeStyles("Drosophila melanogaster")
mapSeqlevels(seqlevels(txdb), "NCBI")
## ----sequence, eval=FALSE---------------------------------------------------------------
# sequence <- seqlevels(x)
#
# ## sequence is in UCSC format and we want NCBI style
# newStyle <- mapSeqlevels(sequence,"NCBI")
# newStyle <- newStyle[complete.cases(newStyle)] # removing NA cases.
#
# ## rename the seqlevels
# x <- renameSeqlevels(x,newStyle)
#
# ## keep only the seqlevels you want (say autosomes)
# auto <- extractSeqlevelsByGroup(species="Homo sapiens", style="NCBI",
# group="auto")
# x <- keepSeqlevels(x,auto)
## ----sessionInfo, results='asis', print=TRUE, eval=TRUE---------------------------------
toLatex(sessionInfo())
|
8eaf26821b589f17d1c3ba7de5bbfb6494685767 | 1d0e055220bb0a5d500eaea5b0eca8446ed8c874 | /tests/testthat/test-global-option.R | e6855338dffee9aef04af6b7ec69f7ed833591e0 | [] | no_license | schoonees/yardstick | 614e97ce246408e9f71cba7abee8348f7fd3eb63 | dc07dbe58e1688b51a5eaaf65cfce6e93a59a9f0 | refs/heads/master | 2020-04-04T18:56:35.617796 | 2018-11-01T20:21:00 | 2018-11-01T20:21:00 | 156,185,946 | 0 | 0 | null | 2018-11-05T08:41:31 | 2018-11-05T08:41:31 | null | UTF-8 | R | false | false | 1,371 | r | test-global-option.R | context("Global option: yardstick.event_first")
lst <- data_altman()
pathology <- lst$pathology
path_tbl <- lst$path_tbl
###################################################################
test_that('starts true', {
expect_true("yardstick.event_first" %in% names(options()))
expect_true(getOption("yardstick.event_first"))
})
test_that('Can flip global option', {
options(yardstick.event_first = FALSE)
on.exit(options(yardstick.event_first = TRUE))
expect_false(getOption("yardstick.event_first"))
})
###################################################################
test_that('switch event definition', {
options(yardstick.event_first = FALSE)
on.exit(options(yardstick.event_first = TRUE))
expect_equal(
sens(pathology, truth = "pathology", estimate = "scan")[[".estimate"]],
54/86
)
expect_equal(
sens(path_tbl)[[".estimate"]],
54/86
)
expect_equal(
spec(pathology, truth = "pathology", estimate = "scan")[[".estimate"]],
231/258
)
expect_equal(
spec(path_tbl)[[".estimate"]],
231/258
)
expect_equal(
j_index(pathology, truth = "pathology", estimate = "scan")[[".estimate"]],
(231/258) + (54/86) - 1
)
expect_equal(
mcc(pathology, truth = "pathology", estimate = "scan")[[".estimate"]],
((231 * 54) - (32 * 27)) / sqrt((231 + 32)*(231 + 27) * (54 + 32) * (54 + 27))
)
})
|
95c8e227b45c668b7b8e261e13e045b11b80ba6f | 09e90273e1005027ddba5a8fd95a9ff65a95febd | /Bond_sim.R | db270fed96defa804a4df69f0fc44f24094445d3 | [] | no_license | sakuvirtanen/poma2020E | f9d7fde4a011ff6a2a7c1bbd377570873588378a | 4b45d08b5c5f0999c6149ae852fb77f7cc2114e5 | refs/heads/main | 2023-01-22T06:27:18.114057 | 2020-11-30T12:53:43 | 2020-11-30T12:53:43 | 316,382,051 | 0 | 3 | null | 2020-11-29T20:53:24 | 2020-11-27T02:34:19 | R | UTF-8 | R | false | false | 2,190 | r | Bond_sim.R |
Bond_sim <- function(ISIN, N, Begin_Date, End_Date, Steps) {
# Date conversion for matching data ranges:
Begin_Date_ = as.yearmon(Begin_Date, "%Y-%m")
Begin_Date_ = as.Date(Begin_Date_)
Begin_Date_ = format(Begin_Date_,"%Y-%m")
class(Begin_Date_)
End_Date_ = End_Date
Sim_First_Date_ = End_Date_
# Extract bond data:
Bond_history = Bonds[Bonds$ISIN==ISIN,]
# Format dates to make matching
Bond_history$Date = format(as.Date(Bond_history$Date), "%Y-%m")
# Store maturity day:
Maturity = as.Date(Bond_history$Maturity[1])
# Store coupon frequency:
Coupon_frequency = Bond_history$`Coupon Freq`[1]
# Store coupon amount:
Coupon = Bond_history$Coupon[1]
# Store yield history:
Yields = Bond_history$`Mid Yield`[c(which(Bond_history$Date == Begin_Date_):which(Bond_history$Date == End_Date_))]
# Vasicek model calibration:
# Estimate the regression:
Yield_reg = lm(Yields[c(2:length(Yields))] ~ Yields[c(1:length(Yields)-1)])
# Estimate lambda i.e. reversion speed:
lambda = as.numeric(1 - Yield_reg$coefficients[2])*12
# Estimate myy i.e. mean yield:
myy = as.numeric(Yield_reg$coefficients[1]/(1-Yield_reg$coefficients[2]))
# Estimate sigma i.e. volatility:
sigma = sqrt(var(Yield_reg$residuals)*12)
# Initialize random variables:
randoms = matrix(rnorm(Steps*N, mean = 0, sd = 1), nrow = N, ncol = Steps)
# Initialize matrix for simualted yields:
yields_sim = matrix(0, nrow = N, ncol = Steps + 1)
# Loop N simulation runs:
for (i in 1:N) {
# Choose normal random variables for yield simulation:
randoms_ = randoms[i,]
# Set initial yield from beginning of simulation:
yields_sim[i,1] = Bond_history$`Mid Yield`[which(Bond_history$Date == Sim_First_Date_)]
# Loop through simulated yields:
for (j in 2:ncol(yields_sim)) {
# Set yield:
yields_sim[i,j] = yields_sim[i,j-1] + (myy-yields_sim[i,j-1])*1/12 + sigma*randoms_[j-1]*sqrt(1/12)
}
}
# Initialize matrix for simulated prices at the end:
Simulations = Bond_price_matrix(Sim_First_Date_, Maturity, Steps, yields_sim/100, Coupon_frequency, Coupon/100)
# return results:
return(Simulations)
}
|
09bd1641a325da053c4d33227a3a9473a12789eb | 50b3141e6964a7e523858f6ed51cb1faa45def71 | /Book of Alexey - page 375 to 387.R | 1553ff11b9a9a313e0020f8ca2bc0b148162574a | [] | no_license | Henryfrss/Studying-R | 03fb64c67fa6461a644ad32b761e307e6fa6f0d7 | d5287bc888c57ea1ee45eb59e8594a9d95ddf5d9 | refs/heads/main | 2023-05-29T22:26:24.140977 | 2021-05-30T22:12:35 | 2021-05-30T22:12:35 | 366,582,256 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,529 | r | Book of Alexey - page 375 to 387.R | #Book of Alexey Shipunov - page 375 to 387
#Read data from bugs.txt
data <- read.table("data/bugs.txt", header = TRUE)
#Find the males with the Length higher than 10 mm
data.m.big <- data[data$SEX == 1 & data$LENGTH > 10 ,]
#Add new colum WEIGHT.R ( ratio between weight and length)
data$WEIGHT.R <- data$WEIGHT/data$LENGTH
#to save the new column at your data.txt
write.table(data, file = "data/bugs.txt", quote = FALSE)
#change SEX and COLOR structure, Numerical to Categorical
data$SEX <- factor(data$SEX, labels = c("female", "male"))
data$COLOR <- factor(data$COLOR, labels = c("red", "blue", "green"))
#If the data have missed values(NA), the commands above will not work
data[3,3] <- NA
#To calculate mean without noticing missing data
mean(data$WEIGHT, na.rm = TRUE)
#Another way is to remove rows with NA from the data
data2.o <- na.omit(data)
#Important to caregorical data
#Look how many times every value appear in the data file
table(data$SEX)
table(data$COLOR)
#Transform frequencies into percents(100% is the total number of bugs)
100*prop.table(table(data$SEX))
#standard deviation(desvio padrao)
sd(data$WEIGHT)
#sd for each numerical column( col 3, 4)
sapply(data[ , 3:4], sd)
#with a missed value( NA VALUE)
sapply(data[ , 3:4], sd, na.rm = TRUE)
#Coefficient of variation (CV)
100*sd(data$WEIGHT)/mean(data$WEIGHT)
#We can calculate any characteristic separately
#For example, means for insect weights
tapply(data$WEIGHT, data$SEX, mean)
#How many individuals of each color are among males and females?
table(data$COLOR, data$SEX)
#Now the same in percents
100*prop.table(table(data$COLOR, data$SEX))
#Mean values of weight separately for every combination of color and sex
#(i.e., for red males, red females, green males.. and so on)
tapply(data$WEIGHT, list(data$SEX, data$COLOR), mean)
#Plotting
hist(data$WEIGHT, breaks=3)
#(to see more detailed, increase the number of breaks)
#if you want to split data in the specific way
#(i.e., by 20 units, starting from 0 and ending in 100)
hist(data$WEIGHT, breaks=seq(0, 100, 20))
#Boxplot
#show outliers, maximum, minimum, quartile range and median
#for any measurement variable
boxplot(data$LENGTH)
#for males and females separately
boxplot(data$LENGTH ~ data$SEX)
#There are two commands which together help to check normality of the character
qqnorm(data$WEIGHT); qqline(data$WEIGHT)
#Scatterplot where all bugs are represented with small circles
plot(data$LENGTH, data$WEIGHT, type="p")
#(type="p" is the default for plot()
#to change the size of dots, use the cex parameter
plot(data$LENGTH, data$WEIGHT, type="p", cex=0.5)
plot(data$LENGTH, data$WEIGHT, type="p", cex=2)
#to change the circles into triangle
plot(data$LENGTH, data$WEIGHT, type="p", pch=2)
#to change the circle for the SEX text code (0/1)
plot(data$LENGTH, data$WEIGHT, type="n")
text(data$LENGTH, data$WEIGHT, labels=data$SEX)
#the same plot, but only with one-letter labels
plot(data$LENGTH, data$WEIGHT, pch=as.character(data$SEX))
#if you want different types, try (0-25)
plot(data$LENGTH, data$WEIGHT, pch=15)
#Its possible to specify different background and frame colors/
plot(data$LENGTH, data$WEIGHT, type="n")
text(data$LENGTH, data$WEIGHT, labels=data$SEX, col=10)
###NOT WORKING###
#You can use symbols from Hershey fonts
plot(data$LENGTH^3, data$WEIGHT, type="n",xlab=expression("Volume (cm"^3*")"), ylab="Weight")
text(data$LENGTH^3, data$WEIGHT, labels=ifelse(data$SEX, "\\MA", "\\VE"), vfont=c("serif","plain"), cex=1.5)
#we can paint symbols with different colors
plot(data$LENGTH, data$WEIGHT, type="n", main = "Bugs Weight*Length", xlab = "LENGTH", ylab = "WEIGHT")
points(data$LENGTH, data$WEIGHT, pch=c(0,3), col=1:2)
#and add a legend
legend("bottomright", legend=c("male", "female"),pch=c(0, 3), col=1:2)
#SAVE THE PLOT AS PDF FILE
dev.copy(pdf, "graph.pdf")
dev.off()
#There is also a better way to save plots
#because it does not duplicate to screen and
#therefore works better in R scripts
pdf("graph.pdf")
plot(data$LENGTH, data$WEIGHT, type="n", main = "Bugs Weight*Length", xlab = "LENGTH", ylab = "WEIGHT")
points(data$LENGTH, data$WEIGHT, pch=c(0,3), col=1:2)
legend("bottomright", legend=c("male", "female"),pch=c(0, 3), col=1:2)
dev.off()
###BE CAREFUL !!!###
#note here that R issues no warning if the
#file with the same name is already exist on the disk
#it simply erases it and saves the new one)
#Testing
#The significance of difference between means for paired parametric data
t.test(data$WEIGHT, data$LENGTH, paired=TRUE) |
993806e5bd6bf7c59073962b1d1a9f0ce3b39d63 | 9ae10d78d198956cb648642554ba7512e21a5b82 | /analysis/Ch2.SI.final.OLD.R | 77184d1ecc53c355331d50fc952227b7d7a4ebb0 | [] | no_license | LaurenWild/UAF_SpermWhale_Diet | 30b7d85a9f0b64713cfdbafc390dc72ebca650c7 | 4deb4e43302c2a9fd275099fdb7878c1b6e617d9 | refs/heads/master | 2020-05-24T13:45:52.781771 | 2019-05-31T01:04:23 | 2019-05-31T01:04:23 | 187,296,462 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 57,861 | r | Ch2.SI.final.OLD.R | # This code was developed to analyze variability in stable isotope ratios of sperm whales, and their groundfish/squid prey.
# This data is part of Chapter 2 of my Ph.D. dissertation.
# Species include sperm whales, sablefish, grenadier, shortraker rockfish, spiny dogfish, skates, robust clubhook squid, magister armhook squid, glass squid, and neocalanus copepods.
# Author: Lauren Wild, lawild@alaska.edu
# March 2019
############################################################
# Load necessary libraries:
library(ggplot2)
library(viridis) # color scheme for plots that is easy to read (from simmr)
library(ggplot2)
library(Hmisc)
library(psych)
library(devtools)
library(plotrix) #library for the std.error function; can use mean_se() as well, get same answer
library(MASS)
library(car)
library(stats)
library(ggpubr)
library(plyr)
library(here)
#### Begin with all layers, Line 32; includes all isotope data for sperm whales with innner layer available
#### Isolate inner layer, begins line 87
#### Prey starts at line 663
#### TL calculations at 1500; will change..
#################################################################################
#### -----------------------------------------------------------------###########
###### -------------------- SPERM WHALE LAYER DATA: -------------------- #######
###### ----------- Contains Inner Layer for isotope analysis ----------- ########
#################################################################################
Pm2<- read.table('/Users/laurenwild/Desktop/UAF/Thesis/StableIsotopes/Data/PmIsotopes2_forR.csv',sep=",",header=TRUE)
View(Pm2)
Pm2<-read.table(here::here("PmIsotopes2_forR.csv"),sep=",",header=TRUE)
str(Pm2) #Get the structure of variables (factor, numeric, etc.)
#Need to make Layer a numeric variable:
Pm2["Layer2"] <- NA #Creates new column
Pm2$Layer2<- 5-as.numeric(Pm2$Layer) # Creates a new column making layer numeric
levels(Pm2$Layer)
str(Pm2$Layer2)
levels(Pm2$Month)
#Explore how all the data looks ...
ggplot(Pm2,aes(d13C,d15N,label=Layer, color=as.factor(Whale.ID))) +
geom_point(size=4) + #scale_color_manual(breaks = Pm2$Whale,values=ccodes) +
geom_text(hjust=0.5,vjust = -2, size = 3)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(legend.position="none")
#Test for normality
hist(Pm2$d13C)
shapiro.test(Pm2$d13C) #p=0.09494, not sig so it IS normal
hist(Pm2$d15N)
shapiro.test(Pm2$d15N) # p=0.1935, not sig so it IS normal
# Test layers with respect to d13C, and d15N:
# Exclude the "full" sample, just compare layers
fit1.C<- lme(d13C~Layer, data=Pm2[Pm2$Layer!='Full',], random=~1|Sample.Number)
summary(fit1.C) #All layers significantly different
summary(aov(fit1.C))
plot(fit1.C)
fit2.C<-lmer(d13C~Layer + (1 | Sample.Number), data=Pm2[Pm2$Layer!='Full',])
summary(fit2.C)
fit1.N<- lme(d15N~Layer, data=Pm2[Pm2$Layer!='Full',], random=~1|Sample.Number)
summary(fit1.N) #All layers significantly different
summary(aov(fit1.N)) #Now "layer" is not significant??????
plot(fit1.N)
ggplot(Pm2,aes(as.factor(Sample.Number),d15N, color=as.factor(Layer))) +
geom_point()+
xlab("Sample Number")+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
labs(color="Layer")
ggplot(Pm2,aes(as.factor(Sample.Number), d13C, color=as.factor(Layer))) +
geom_point()+
xlab("Sample Number")+
ylab(expression(paste(delta^13, "C (\u2030)",sep="")))+
labs(color="Layer")
######################################################################
######################################################################
### Isolate Inner Layer ### SAMPLE SIZE = 33
######################################################################
Pm2Inner<- Pm2[Pm2$Layer=="Inner",]
dim(Pm2Inner) #length of data set is 33 samples
# Set up month as a numeric value
match(Pm2Inner$Month, month.abb)
sapply(Pm2Inner$Month,function(x) grep(paste("(?i)",x,sep=""),month.abb))
Pm2Inner$Month2<- match(Pm2Inner$Month, month.abb)
#Convert date to julian date and add column to data frame.
library(date)
class(Pm2Inner$Date) #It's a factor variable
Pm2Inner$Date2<- as.POSIXlt(Pm2Inner$Date, format='%m/%d/%y')
class(Pm2Inner$Date2) #Now class is POSIXlt POSIXt
View(Pm2Inner) #Should have dates in Date2 Column!
doy <- strftime(Pm2Inner$Date2, format = "%j") # new vector of dates in julian format
doy<-as.numeric(doy) #make julian date numeric
Pm2Inner$doy<-doy #add julian date to data frame
View(Pm2Inner) # Check everything looks good!
#Plot out all sperm whale inner layers (n=33):
ggplot(Pm2Inner,aes(d13C,d15N)) +
geom_point(size=4) +
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw(base_size = 20, base_family = "Helvetica")
# Explore how d13C and d15N values of inner layers change by month:
setwd('/Users/laurenwild/Desktop')
tiff(filename="InnerLayerByMonth.tiff", height = 12, width = 17, units = 'cm',
compression = "lzw", res = 300)
ggplot(Pm2Inner,aes(d13C,d15N,label=Month, color=as.factor(Month))) +
geom_point(size=4) + #scale_color_manual(breaks = Pm2$Whale,values=ccodes) +
geom_text(hjust=0.5,vjust = -2, size = 2)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw(base_size = 20, base_family = "Helvetica") +
labs(color="Month") #takes off as.factor from legend!
dev.off()
MonthLabs<-c("May", "Jun", "Jul", "Aug", "Sep")
I.fit1.1 <- aov(d13C~Month, data=Pm2Inner)
summary(I.fit1.1) #Significant @ 5% level p=0.018
ggplot(Pm2Inner, aes(as.factor(Month2), d13C))+
geom_boxplot()+
scale_x_discrete(labels=c('5'='May', '6'='Jun', '7'='Jul', '8'='Aug', '9'='Sep'))+
xlab("Month")+
ylab(expression(paste(delta^13, "C (\u2030)",sep="")))+
theme_bw()
ggplot(Pm2Inner, aes(as.factor(Month2), d13C)) + geom_boxplot() +
xlab("Month")+
ylab(expression(paste(delta^13, "C (\u2030)",sep=""))) +
scale_x_discrete(labels=MonthLabs)+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
I.fit1.2 <- aov(d15N~Month, data=Pm2Inner)
summary(I.fit1.2) #Not significant p=0.051
ggplot(Pm2Inner, aes(as.factor(Month2), d15N))+
geom_boxplot()+
scale_x_discrete(labels=c('5'='May', '6'='Jun', '7'='Jul', '8'='Aug', '9'='Sep'))+
xlab("Month")+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw()
ggplot(Pm2Inner, aes(as.factor(Month2), d15N)) + geom_boxplot() +
xlab("Month")+
ylab(expression(paste(delta^15, "N (\u2030)",sep=""))) +
scale_x_discrete(labels=MonthLabs)+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
##Pairwise t-test to test if different months have sig diff in d13C & d15N values;
## NEED TO LOOK UP p.adj TREATMENTS
tapply(Pm2Inner$d13C,INDEX=list(Pm2Inner$Month), FUN=mean)
pairwise.t.test(Pm2Inner$d13C, Pm2Inner$Month, p.adj="none")
tapply(Pm2Inner$d15N,INDEX=list(Pm2Inner$Month), FUN=mean)
pairwise.t.test(Pm2Inner$d15N, Pm2Inner$Month, p.adj="none")
###Plot ISOTOPE RATIOS by YEAR to see if any potential baseline shifts over the sampling period
ggplot(Pm2Inner, aes(as.factor(Year), d13C)) + geom_boxplot() +
xlab("Year")+
ylab(expression(paste(delta^13, "C (\u2030)",sep=""))) +
#scale_x_discrete(labels=MonthLabs)+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
I.fit1.3 <- aov(d13C~Year, data=Pm2Inner)
summary(I.fit1.3) #Significant at 5% level p=0.0396
p<-plot(as.factor(Pm2Inner$Year), Pm2Inner$d13C, main="Inner Layer d13C by Year", xlab="Year", ylab=expression(paste(delta^13, "C (\u2030)",sep="")))
plot(as.factor(Pm2Inner$Year), Pm2Inner$d13C, main="Inner Layer d13C by Year", xlab="Year", ylab=expression(paste(delta^13, "C (\u2030)",sep="")))
text(seq_along(Pm2Inner), p$stats[3,], p$n)
#Probably not enough data points per year ....
ggplot(Pm2Inner, aes(as.factor(Year), d15N)) + geom_boxplot() +
xlab("Year")+
ylab(expression(paste(delta^15, "N (\u2030)",sep=""))) +
#scale_x_discrete(labels=MonthLabs)+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
I.fit1.4 <- aov(d15N~Year, data=Pm2Inner)
summary(I.fit1.4) #Not significant
q<-plot(as.factor(Pm2Inner$Year), Pm2Inner$d15N,main="Inner Layer d15N by Year", xlab="Year", ylab=expression(paste(delta^15, "N (\u2030)",sep="")))
plot(as.factor(Pm2Inner$Year), Pm2Inner$d15N,main="Inner Layer d15N by Year", xlab="Year", ylab=expression(paste(delta^15, "N (\u2030)",sep="")))
text(seq_along(Pm2Inner), q$stats[3,], q$n)
#SET UP AVG YEARLY ISOTOPE RATIOS FOR INNER LAYER SAMPLES BIPLOT
PmInnerAvg <- data.frame(cbind(Pm2Inner$d15N, Pm2Inner$d13C))
d15N.In.Mean <- aggregate(d15N~Year, data=Pm2Inner, mean)
d13C.In.Mean <- aggregate(d13C~Year, data=Pm2Inner, mean)
d15N.In.SD <- aggregate(d15N~Year, data=Pm2Inner, sd)
d13C.In.SD <- aggregate(d13C~Year, data=Pm2Inner, sd)
PmInnerAvgYr <- data.frame(cbind(d15N.In.Mean,d13C.In.Mean,d15N.In.SD, d13C.In.SD))
View(PmInnerAvgYr)
PmInnerAvgYr$Year.1=NULL
PmInnerAvgYr$Year.2=NULL
PmInnerAvgYr$Year.3=NULL
colnames(PmInnerAvgYr) <- c("Year", "d15N", "d13C", "SD.N", "SD.C")
View(PmInnerAvgYr)
setwd('/Users/laurenwild/Desktop')
tiff(filename="PmInner_By_Year.tiff", height = 12, width = 17, units = 'cm',
compression = "lzw", res = 200)
ggplot(PmInnerAvgYr,aes(d13C,d15N, label=Year)) + geom_point(size=4) +
geom_errorbarh(aes(xmax=PmInnerAvgYr$d13C+PmInnerAvgYr$SD.C,xmin=PmInnerAvgYr$d13C-PmInnerAvgYr$SD.C, height = 0.01)) +
geom_errorbar(aes(ymax=PmInnerAvgYr$d15N+PmInnerAvgYr$SD.N,ymin=PmInnerAvgYr$d15N-PmInnerAvgYr$SD.N, width = 0.01))+
geom_text(hjust=0.5,vjust = -2, size = 3)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
labs(color="Year") #takes off as.factor from legend!
dev.off()
ggplot(Pm2Inner,aes(d13C,d15N, color=as.factor(Year), label=Year)) + geom_point(size=4) +
geom_text(hjust=0.5,vjust = -2, size = 3)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
labs(color="Year") #takes off as.factor from legend!
ggplot(Pm2Inner,aes(d13C,d15N,color=Season)) +
geom_point(size=4) + #scale_color_manual(breaks = Pm2$Whale,values=ccodes) +
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_viridis_d(breaks=c("Early","Mid","Late"))+
theme_bw(base_size = 20, base_family = "Helvetica") +
labs(color="Season") #takes off as.factor from legend!
#Plot Frequent versus non-frequent depredators
ggplot(Pm2Inner,aes(d13C,d15N, color=as.factor(Frequent))) + geom_point(size=4) +
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_manual(values=c("Frequent"="purple", "Non-Frequent"="blue", "Unk"="grey"))+
theme_bw(base_size = 24, base_family = "Helvetica")+
labs(color="Frequent") + theme(legend.position = c(0.15, 0.8))+
theme(legend.title=element_blank(),legend.text=element_text(size=8)) #legend.direction = "horizontal"
Pm2InnerSerial<-subset(Pm2Inner, !Frequent=="Unk")
ggplot(Pm2InnerSerial,aes(d13C,d15N, color=as.factor(Frequent))) + geom_point(size=4) +
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_manual(values=c("Frequent"="purple", "Non-Frequent"="blue"))+
theme_bw(base_size = 24, base_family = "Helvetica")+
labs(color="Frequent") + theme(legend.position = c(0.15, 0.8))+
theme(legend.title=element_blank(),legend.text=element_text(size=8)) #legend.direction = "horizontal"
ggplot(Pm2Inner,aes(d13C,d15N, color=as.factor(Recent))) + geom_point(size=4) +
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
labs(color="Recent") + theme(legend.position = c(0.15, 0.8))+
theme(legend.title=element_blank(),legend.text=element_text(size=8))+ #legend.direction = "horizontal"
scale_color_discrete(breaks=c("Old", "Recent"),
labels=c("Old (<2010)", "Recent (>2010)"))
Pm2InnerRecent<-subset(Pm2Inner, Recent=="Recent")
ggplot(Pm2InnerRecent,aes(d13C,d15N, color=as.factor(Frequent))) + geom_point(size=4) +
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_manual(values=c("Frequent"="purple", "Non-Frequent"="blue"))+
theme_bw(base_size = 24, base_family = "Helvetica")+
labs(color="Frequent") + theme(legend.position = c(0.85, 0.2))+
theme(legend.title=element_blank(),legend.text=element_text(size=8)) #legend.direction = "horizontal"
InnerOldRecentC<-aov(d13C~Recent, data=Pm2Inner) #sig @ 5% level recent vs. old for d13C
InnerOldRecentN<-aov(d15N~Recent, data=Pm2Inner) #not sig recent vs. old for d15N
fitdoyC<-lm(d13C~doy, data=Pm2Inner) #doy not significant
fitdoyN<-lm(d15N~doy, data=Pm2Inner) #doy not significant
write.table(Pm2Inner, file="PmSIMMData.csv", sep=",")
#################################################################
### Mixed Effects models for d15N: DAY OF YEAR, REGION, YEAR ###
#################################################################
mod1<-lme(d15N ~ doy + I(doy^2) + as.factor(Region), data=Pm2Inner, random = ~ 1 |as.factor(Year))
mod2<-lme(d15N ~ doy + as.factor(Region), data=Pm2Inner, random = ~ 1|as.factor(Year))
mod3<-lme(d15N ~ doy + I(doy^2) + as.factor(Region) + Year, data=Pm2Inner, random = ~ 1 |as.factor(Sample.Number))
mod4<-lme(d15N ~ doy + as.factor(Region) + Year, data=Pm2Inner, random= ~ 1|as.factor(Sample.Number))
mod5<-lm(d15N ~ doy + as.factor(Region) + Year, data=Pm2Inner)
anova(mod1) #doy^2 term is significant (p=0.0297), which means there is slightly significant seasonal variation.
anova(mod2) #neither variable significant
anova(mod3) #doy^2 significant p=0.0085; Year almost p=0.0659
anova(mod4) #no variables significant
anova(mod5) #no variables significant
dredge(mod1) #best model is intercept-only model; AICc=86.3, next lowest 92.5(w/ region)
dredge(mod2) #best model is intercept-only model; AICc=86.3, next lowest 92.5(w/ region)
dredge(mod3) #best model is intercept-only model; AICc=87.5, next lowest 94.5(w/ region)
dredge(mod4) #best model is intercept-only model; AICc=87.5, next lowest 94.5(w/ region)
setwd('/Users/laurenwild/Desktop')
tiff(filename="DOY_d15N_Plot.tiff", height = 12, width = 17, units = 'cm',
compression = "lzw", res = 300)
ggplot(aes(doy,d15N), data=Pm2Inner)+
geom_point() +
#geom_line(aes(x=Pm2Inner$doy, y), data=Pm2Inner) +
#geom_line(aes(doy,fitted), data=Pm2Inner)
stat_smooth(method="lm", formula = y~ x + I(x^2), size = 0.5)+
xlab("Julian Day")+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw()
dev.off()
plot(mod1)
plot(mod2)
plot(Pm2Inner$doy, residuals(mod1)) #look @ residuals over time to make sure they look random
plot(Pm2Inner$doy, residuals(mod2))
#################################################################
### Mixed Effects models for d13C: DAY OF YEAR, REGION, YEAR ###
#################################################################
mod6<-lme(d13C ~ doy + I(doy^2) + as.factor(Region), data=Pm2Inner, random = ~ 1 |as.factor(Year))
mod7<-lme(d13C ~ doy + as.factor(Region), data=Pm2Inner, random = ~ 1|as.factor(Year))
mod8<-lme(d13C ~ doy + I(doy^2) + as.factor(Region) + Year, data=Pm2Inner, random = ~ 1 |as.factor(Sample.Number))
mod9<-lme(d13C ~ doy + as.factor(Region) + Year, data=Pm2Inner, random= ~ 1|as.factor(Sample.Number))
anova(mod6) #no variables significant (doy is close, p=0.086).
anova(mod7) #neither variable significant (doy is close, p=0.0998)
anova(mod8) #doy is significant (p=0.0239) and region (p=0.0463) @ 5% level
anova(mod9) #no variables significant
dredge(mod6) #best model is intercept-only model; AICc = 65.0
dredge(mod7) #best model is intercept-only model; AICc=65.0
dredge(mod8) #best model is intercept-only model; AICc=70.0
dredge(mod9) #best model is intercept-only model; AICc=70.0
ggplot(aes(doy,d13C), data=Pm2Inner)+
geom_point() +
#geom_line(aes(x=Pm2Inner$doy, y), data=Pm2Inner) +
#geom_line(aes(doy,fitted), data=Pm2Inner)
stat_smooth(method="lm", formula = y~ x + I(x^2), size = 0.5)+
xlab("Julian Day")+
ylab(expression(paste(delta^13, "C (\u2030)",sep="")))+
theme_bw()
#########################################################################
##### BUILD SEPARATE Pm DATAFRAMES FOR SERIAL, NONSERIAL, RECENT, & OLD
##### To input later for use with Mixing Models #######
#########################################################################
#keep<- c("GOA.091", "GOA-091", "GOA-064", "GOA-010", "GOA-085", "GOA-026")
#Pm2InnerSerial<-Pm2Inner[Pm2Inner$Whale.ID %in% keep,]
Pm2InnerSerial<-subset(Pm2Inner, Frequent=="Frequent")
View(Pm2InnerSerial)
write.table(Pm2InnerSerial, file="PmSISerial.csv", sep=",")
#Pm2InnerNonSerial<- Pm2Inner[!Pm2Inner$Whale.ID %in% keep,]
Pm2InnerNonSerial<- subset(Pm2Inner, Frequent=="Non-Frequent")
View(Pm2InnerNonSerial)
write.table(Pm2InnerNonSerial, file="PmSINonSerial.csv", sep=",")
Pm2InnerRecent<-Pm2Inner[Pm2Inner$Year>2009,]
View(Pm2InnerRecent)
write.table(Pm2InnerRecent, file="PmSIRecent.csv", sep=",")
Pm2InnerOld<-Pm2Inner[Pm2Inner$Year<2010,]
View(Pm2InnerOld)
write.table(Pm2InnerOld, file="PmSIOld.csv", sep=",")
###############################################################
## -------------------- PREY ------------------------------- ##
##### --------------------------------------------------- #####
Prey2<-read.csv('/Users/laurenwild/Desktop/UAF/Thesis/StableIsotopes/Data/Prey.LE.final.Isotopes.forR4.csv',header=TRUE, sep=",")
View(Prey2)
#Subest each species
Or<-subset(Prey2, Species=='Clubhook Squid')
nrow(Or)
Ia<-subset(Prey2, Species=='Ragfish')
nrow(Ia)
Af<-subset(Prey2, Species=="Sablefish")
nrow(Af)
Cy<-subset(Prey2, Species=='Grenadier')
nrow(Cy)
Ap<-subset(Prey2, Sub.Species=='Giant Grenadier')
nrow(Ap)
Cy2<-subset(Prey2, Sub.Species=='Grenadier')
nrow(Cy2)
Sb<-subset(Prey2, Species=="Shortraker Rockfish")
nrow(Sb)
Sa<-subset(Prey2, Species=="Spiny Dogfish")
nrow(Sa)
Rb<-subset(Prey2, Species=="Skate")
nrow(Rb)
Rr<-subset(Prey2, Sub.Species=="Longnose Skate")
nrow(Rr)
Bm<-subset(Prey2, Species=='Magister Squid')
nrow(Bm)
Gp<-subset(Prey2, Species=='Glass Squid')
nrow(Gp)
#Calculate differences between bulk & LE, to determine if there is difference in LE;
Or$D.d15N<-Or$d15N.bulk - Or$d15N.LE
Or$D.d13C<-Or$d13C.bulk - Or$d13C.LE
Cy$D.d15N<-Cy$d15N.bulk - Cy$d15N.LE
Cy$D.d13C<-Cy$d13C.bulk - Cy$d13C.LE
Af$D.d15N<-Af$d15N.bulk - Af$d15N.LE
Af$D.d13C<-Af$d13C.bulk - Af$d13C.LE
Sb$D.d15N<-Sb$d15N.bulk - Sb$d15N.LE
Sb$D.d13C<-Sb$d13C.bulk - Sb$d13C.LE
Bm$D.d15N<-Bm$d15N.bulk - Bm$d15N.LE
Bm$D.d13C<-Bm$d13C.bulk - Bm$d13C.LE
Sa$D.d15N<-Sa$d15N.bulk - Sa$d15N.LE
Sa$D.d13C<-Sa$d13C.bulk - Sa$d13C.LE
Rb$D.d15N<-Rb$d15N.bulk - Rb$d15N.LE
Rb$D.d13C<-Rb$d13C.bulk - Rb$d13C.LE
Gp$D.d15N<-Gp$d15N.bulk - Gp$d15N.LE
Gp$D.d13C<-Gp$d13C.bulk - Gp$d13C.LE
Ia$D.d15N<-Ia$d15N.bulk - Ia$d15N.LE
Ia$D.d13C<-Ia$d13C.bulk - Ia$d13C.LE
####------------------------------------------
#### PLOTS to see effect of LE on d15N values:
####------------------------------------------
#Onykia plots to look at LE vs NLE effect on d15N, d13C, and C:N values:
ggplot(Or, aes(d15N.bulk, d15N.LE)) + geom_point()+
geom_smooth(method='lm')+
xlab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0)
Or.N <- lm(d15N.bulk~d15N.LE, data=Or)
summary(Or.N)
t.test(Or$D.d15N) #p=0.049, not significant @ 5% level, so bulk N is no different from LE.
ggplot(Or, aes(d13C.bulk, d13C.LE)) + geom_point()+
geom_smooth(method='lm')+
xlab(expression(paste(delta^13, "C (\u2030)", " bulk")))+
ylab(expression(paste(delta^13, "C (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0)
Or.C <- lm(d13C.bulk~d13C.LE, data=Or)
summary(Or.C)
t.test(Or$D.d13C) #p=0.004 Confirms big difference in LE vs NLE for carbon, justifies lipid-extracting
#Grenadier Plots to look at LE vs NLE of d15N, d13C, and C:N values
ggplot(Cy, aes(d15N.bulk, d15N.LE)) + geom_point()+
geom_smooth(method='lm')+
xlab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0)
Cy.N<-lm(d15N.bulk ~ d15N.LE, data=Cy)
summary(Cy.N) #p=<0.0001, so d15N is sig different between LE & NLE
t.test(Cy$D.d15N) #p=0.0004 - so should probably LE 1/2 and NLE other half
#Sablefish Plots to look at LE vs NLE of d15N values
ggplot(Af, aes(d15N.bulk, d15N.LE)) + geom_point()+
geom_smooth(method='lm')+
#scale_x_continuous(name=expression(paste(delta^15, "N (\u2030)", " bulk")), breaks=seq(12.5,18,1), limits=c(12.5,18))+
#scale_y_continuous(name=expression(paste(delta^15, "N (\u2030)", " extracted")), breaks=seq(12.5,18.5,1), limits=c(12.5,18.5))+
xlab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0)
Af.N <- lm(d15N.bulk~d15N.LE, data=Af)
summary(Af.N)
t.test(Af$D.d15N) #p<0.0001 - so should LE for C and NLE for N
ggplot(Af, aes(d13C.bulk, d13C.LE)) + geom_point()+
geom_smooth(method='lm')+
scale_x_continuous(name=expression(paste(delta^13, "C (\u2030)", " bulk")), breaks=seq(-22,-16,1), limits=c(-22,-16))+
scale_y_continuous(name=expression(paste(delta^13, "C (\u2030)", " extracted")), breaks=seq(-22,-16,1), limits=c(-22,-16))+
xlab(expression(paste(delta^13, "C (\u2030)", " bulk")))+
ylab(expression(paste(delta^13, "C (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0)
Af.C <- lm(d13C.bulk~d13C.LE, data=Af)
summary(Af.C)
t.test(Af$D.d13C) #p=7.98e-09, sig diff btwn LE & NLE, so need to LE
#Rockfish Plots to look at LE vs NLE of d15N values
ggplot(Sb, aes(d15N.bulk, d15N.LE)) + geom_point()+
geom_smooth(method='lm')+
xlab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0)
Sb.N<-lm(d15N.bulk ~ d15N.LE, data=Sb)
summary(Sb.N)
t.test(Sb$D.d15N) #p<0.0001 - so should LE for C and NLE for N
## Berryteuthis Plots to look at LE vs NLE of d15N
ggplot(Bm, aes(d15N.bulk, d15N.LE)) + geom_point()+
geom_smooth(method='lm')+
xlab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0)
Bm.N <- lm(d15N.bulk~d15N.LE, data=Bm)
summary(Bm.N)
t.test(Bm$D.d15N) #p<0.0001, so need to use bulk d15N
ggplot(Bm, aes(d13C.LE, d15N.bulk)) + geom_point() +
xlab(expression(paste(delta^13, "C (\u2030)"))) +
ylab(expression(paste(delta^15, "N (\u2030)"))) +
theme_bw(base_size = 24, base_family = "Helvetica")
## Spiny Dogfish Plots to look at LE vs NLE of d15N
ggplot(Sa, aes(d15N.bulk, d15N.LE)) + geom_point()+
geom_smooth(method='lm')+
xlab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0) #Plot really looks off - like we need to use bulk d15N
Sa.N <- lm(d15N.bulk~d15N.LE, data=Sa)
summary(Sa.N)
t.test(Sa$D.d15N) #p=0.226, so don't need to use bulk d15N
## Skate Plots to look at LE vs NLE of d15N
ggplot(Rb, aes(d15N.bulk, d15N.LE)) + geom_point()+
geom_smooth(method='lm')+
xlab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " extracted")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
geom_abline(intercept=0)
Rb.N <- lm(d15N.bulk~d15N.LE, data=Rb)
summary(Rb.N)
t.test(Rb$D.d15N) #p=0.988, so don't need to use bulk d15N
### Ragfish
ggplot(Ia, aes(d13C.LE, d15N.bulk)) + geom_point()+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
#PWS shallower, but larger, ragfish are 1permil higher in both d13C & d15N...
###########################################################
### Explore each species plot for outliers:
###########################################################
ggplot(Af, aes(d13C.LE, d15N.bulk)) + geom_point()+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none') #one potential outlier more negative d13C,row 162
ggplot(Cy, aes(d13C.LE, d15N.bulk, color=Sub.Species)) + geom_point()+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none') #no outliers
#theme(legend.position=c(0.2,0.82))
## plot shows Giant and Pacific Grenadier have similar isotope ratios, and it's a huge spread
ggplot(Sb, aes(d13C.LE, d15N.bulk)) + geom_point()+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none') #one outlier in low d15N, low d13C, row 256
ggplot(Sa, aes(d13C.LE, d15N.bulk)) + geom_point()+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica") #no outliers for bulk values
ggplot(Rb, aes(d13C.LE, d15N.LE, color=Sub.Species)) + geom_point()+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
## plot shows longnose and skate have similar isotope ratios, and it's a huge spread in Nitrogen;
#two outliers in d13C, lines 310 & 311
######--------------------------------
######--------------------------------
### DEAL WITH OUTLIERS:
Prey3<- Prey2[-c(55, 162, 254, 318, 324),] #all outliers removed.
#Prey2<- Prey2[-c(55),] #one outlier with grenadier length 45
#Prey2<- Prey2[-c(310,311),] #two outliers in skates with very negative d13C values
#Prey2<- Prey2[-c(256),] #one outlier in Shortraker rockfish less than 12permil, row 256
#Prey2<- Prey2[-c(162,170),] #two sablefish outliers
#Prey2<- Prey2[-c(382,383,384),] #three outliers for spiny dogfish, low d15N LE
View(Prey3)
write.table(Prey3, file="Prey-outliers-removed-Sep2018.csv", sep=",")
Prey3<-read.table('/Users/laurenwild/Desktop/UAF/Thesis/StableIsotopes/Data/Prey-outliers-removed-Sep2018.csv',sep=",",header=TRUE)
View(Prey3)
Prey3$Depth.Strata<-as.factor(Prey3$Depth.Strata) #Needs to be factor for significance testing
Or<-subset(Prey3, Species=='Clubhook Squid')
Ia<-subset(Prey3, Species=='Ragfish')
Af<-subset(Prey3, Species=="Sablefish")
Cy<-subset(Prey3, Species=='Grenadier')
Ap<-subset(Prey3, Sub.Species=='Giant Grenadier')
Cy2<-subset(Prey3, Sub.Species=='Grenadier')
Sb<-subset(Prey3, Species=="Shortraker Rockfish")
Sa<-subset(Prey3, Species=="Spiny Dogfish")
Rb<-subset(Prey3, Species=="Skate")
Rr<-subset(Prey3, Sub.Species=="Longnose Skate")
Bm<-subset(Prey3, Species=='Magister Squid')
Gp<-subset(Prey3, Species=='Glass Squid')
###--------------------------------------------
#Species that need separate bulk N and LE for C: Sablefish, Grenadier, Shortraker, Magister Squid, Glass Squid, Ragfish, Spiny Dogfish; Species that don't: Skate, clubhook squid
#Need to make columns that have d13C LE values and d15N bulk values where necessary
#Also d13C LE values and d15N LE values where allowed as well...
#For species that have bulk values, reduce spreadsheet to just the rows where bulk was done:
Af2<-Af[!is.na(Af$d15N.bulk),]
Af2$d15N<-Af2$d15N.bulk
nrow(Af2) #45
ncol(Af2) #30
Cy2<-Cy[!is.na(Cy$d15N.bulk),]
Cy2$d15N<-Cy2$d15N.bulk
nrow(Cy2) #44
ncol(Cy2) #30
Sb2<-Sb[!is.na(Sb$d15N.bulk),]
Sb2$d15N<-Sb2$d15N.bulk
nrow(Sb2) #44
ncol(Sb2) #30
Sa2<-Sa[!is.na(Sa$d15N.bulk),]
Sa2$d15N<-Sa2$d15N.bulk
nrow(Sa2) #34
ncol(Sa2) #30
Bm2<-Bm[!is.na(Bm$d15N.bulk),]
Bm2$d15N<-Bm2$d15N.bulk
nrow(Bm2) #44
ncol(Bm2) #30
Ia$d15N<-Ia$d15N.bulk
nrow(Ia) #3
ncol(Ia) #30
Gp$d15N<-Gp$d15N.bulk
nrow(Gp) #2
ncol(Gp) #30
Or$d15N<-Or$d15N.bulk
nrow(Or) #10
ncol(Or) #30
Rb$d15N<-Rb$d15N.LE
nrow(Rb) #66
ncol(Rb) #30
mean(Ia$d15N.bulk)
sd(Ia$d15N.bulk)
mean(Ia$d13C.LE)
sd(Ia$d13C.LE)
#Combine dogfish & ragfish as a source (b/c simmr only allows 2 sp to be combined, and need Af, Sa, & Ia)
SaIa<-rbind(Sa2, Ia)
nrow(SaIa) #37, 3 ragfish & 34 dogfish
SaIa$Species<-'Dogfish.Ragfish'
###---------------------------------------------------------------------
### Explore each species (length/isotope relationship, subspecies/isotope relationship, etc.)
###
library(jmv) #Has the mancova function
## GRENADIER ####
Cy2$d13C.abs<-Cy2$d13C.LE*-1
Grennie<-mancova(data=Cy2, deps=vars(d15N.bulk, d13C.LE), factors=vars(Depth.Strata, Sub.Species), covs=Length)
Grennie<-manova(cbind(d15N.bulk,d13C.LE)~Length+Depth.Strata+Sub.Species, data=Cy2)
summary(Grennie) #only length,
summary.aov(Grennie) #d13C significant at 1% and d15N almost at 5% level for Length
summary(aov(d15N.bulk~Length, data=Cy2)) #confirmed not over %
summary(aov(d13C.LE~Length, data=Cy2)) #confirmed significant
ggplot(Cy, aes(Length, d15N.bulk, color=Sub.Species)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none') #Length 45cm is an outlier. Remove row 55?
ggplot(Cy, aes(Length, d13C.LE, color=Sub.Species)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Cy, aes(Depth.Strata, d13C.LE, color=Sub.Species)) + geom_point()+
xlab("Depth Strata")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Cy, aes(Depth.Strata, d15N.bulk, color=Sub.Species)) + geom_point()+
xlab("Depth Strata")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
### SKATE ####
Skate<-manova(cbind(d15N.bulk,d13C.LE)~Length+Depth.Strata+Sub.Species, data=Rb)
summary(Skate) #Length & maybe depth strata significant
summary.aov(Skate) # Depth & d15N at 5% level; p=0.45; Length at d13C, p<0.002
summary(aov(d15N.bulk~Depth.Strata, data=Rb)) #p=0.44
summary(aov(d13C.LE~Length, data=Rb)) #p=0.004
ggplot(Rb, aes(Length, d15N.LE, color=Sub.Species)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Rb, aes(Length, d13C.LE, color=Sub.Species)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Rb, aes(Depth.Strata, d15N.LE, color=Sub.Species)) + geom_point()+
xlab("Depth Strata")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Rb, aes(Depth.Strata, d13C.LE, color=Sub.Species)) + geom_point()+
xlab("Depth Strata")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
## SHORTRAKER ROCKFISH ###
SrRock<-manova(cbind(d15N.bulk,d13C.LE)~Length+Depth.Strata+Year, data=Sb2)
summary(SrRock) # Nothing significant
summary.aov(SrRock) #Nothing Significant
ggplot(Sb, aes(Length, d15N.bulk)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")
ggplot(Sb, aes(Length, d13C.LE)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")
ggplot(Sb, aes(Depth.Strata, d13C.LE)) + geom_point()+
xlab("Depth Strata")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Sb, aes(Depth.Strata, d15N.bulk)) + geom_point()+
xlab("Depth Strata")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
#### SABLEFISH ###
Sable<-manova(cbind(d15N.bulk,d13C.LE)~Length+Depth.Strata, data=Af2)
summary(Sable) # Nothing significant
summary.aov(Sable) #Nothing significant
ggplot(Af, aes(Length, d15N.bulk)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")
ggplot(Af, aes(Length, d13C.LE)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")
### SPINY DOGFISH #####
Dogfish<-manova(cbind(d15N.bulk,d13C.LE)~Length+Depth.Strata, data=Sa2)
summary(Dogfish) # Depth.Strata significant
summary.aov(Dogfish) #Length for d15N and Depth for d13C both at 5% level
summary(aov(d15N.bulk~Length, data=Af2)) # at 5% level
summary(aov(d13C.LE~Depth.Strata, data=Af2)) #Not significant
ggplot(Sa, aes(Length, d15N.bulk)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")
ggplot(Sa, aes(Length, d13C.LE)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")
ggplot(Sa, aes(Depth.Strata, d15N.bulk)) + geom_point()+
xlab("Depth")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")
ggplot(Sa, aes(Depth.Strata, d13C.LE)) + geom_point()+
xlab("Depth")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")
### BERRYTEUTHIS MAGISTER ####
Berry<-manova(cbind(d15N.bulk,d13C.LE)~Length*Depth.Strata, data=Bm2)
summary(Berry) #Length is significant
summary.aov(Berry) #Length and d15N signficant
summary(aov(d15N.bulk~Length, data=Bm2)) #Significant
summary(aov(d13C.LE~Depth.Strata, data=Bm2)) #significant at 5% level
ggplot(Bm, aes(Length, d15N.bulk)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Bm, aes(Length, d13C.LE)) + geom_point()+
xlab("Length")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Bm, aes(Depth.Strata, d13C.LE)) + geom_point()+
xlab("Depth Strata")+
ylab(expression(paste(delta^13, "C (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
ggplot(Bm, aes(Depth.Strata, d15N.bulk)) + geom_point()+
xlab("Depth Strata")+
ylab(expression(paste(delta^15, "N (\u2030)")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.position='none')
Berry<-manova(cbind(d15N.bulk,d13C.LE)~Length+Depth.Strata, data=Bm2)
summary(Berry) #Length is significant
summary.aov(Berry) #Length and d15N signficant
summary(aov(d15N.bulk~Length, data=Bm2)) #Significant
summary(aov(d13C.LE~Depth.Strata, data=Bm2)) #significant at 5% level
#Then combine everything back to new reduced dataset
Prey3.2<-rbind(Or,Cy2,Af2,Sb2,Sa2,Rb,Bm2,Ia,Gp)
Prey3.3<-rbind(Or,Cy2,Af2,Sb2,Sa2,Rb,Bm2)
Prey3.4<-rbind(Or,Cy2,Af2,Sb2,Sa2,Rb,Bm2,Gp)
Prey3.5<-rbind(Or,Cy2,Af2,SaIa,Sb2,Rb,Bm2,Gp)
#Now want the LE values of d13C ...
Prey3.2$d13C<- Prey3.2$d13C.LE
Prey3.3$d13C<- Prey3.3$d13C.LE
Prey3.4$d13C<- Prey3.4$d13C.LE
Prey3.5$d13C<- Prey3.5$d13C.LE
color2<-c("red","forestgreen","cyan1","black","blue","purple","darkslategray3", "darkgoldenrod")
ggplot(Prey3.2, aes(d13C, d15N, color=Species)) + geom_point(size=3)+
xlab(expression(paste(delta^13, "C (\u2030)", sep='')))+
ylab(expression(paste(delta^15, "N (\u2030)", sep='')))+
scale_colour_viridis_d()
color3<-c("red","forestgreen","black","blue","purple","darkslategray3", "darkgoldenrod")
ggplot(Prey3.3, aes(d13C, d15N, color=Species)) + geom_point(size=3)+
xlab(expression(paste(delta^13, "C (\u2030)", sep='')))+
ylab(expression(paste(delta^15, "N (\u2030)", sep='')))+
scale_colour_viridis_d()
#Add elipses
ggplot(Prey3.2, aes(d13C, d15N, color=Species)) + geom_point()+
xlab(expression(paste(delta^13, "C (\u2030)", sep='')))+
ylab(expression(paste(delta^15, "N (\u2030)", sep='')))+
#theme_bw(base_size = 24, base_family = "Helvetica")+
scale_colour_viridis_d()+
stat_ellipse(type="norm")
#Put all d15N values in the same column (bulk for those that need bulk, and LE for those without)
#Now the d15N.final column has the accurate number for that species. Same for d13C
#Prey3$d15N<- Prey3$d15N.bulk
#View(Prey3)
#for (i in 1:nrow(Prey3)) {
# Prey3$d15N[is.na(Prey3$d15N)] <- Prey3$d15N.LE[is.na(Prey3$d15N)]
#}
#View(Prey3)
#Prey2.2<-Prey2[!is.na(Prey2$d13C.LE),]
#Prey2.2<-Prey2.2[!is.na(Prey2.2$d15N.LE),]
########################################################
########################################################
library(doBy)
myfun1<-function(x) {c(m=mean(x) , v=var(x))}
myfun2<-function(x) {c(m=mean(x), sd=sd(x))}
All.Prey.Sum<- summaryBy(d15N+d13C~Species, data=Prey3.3, FUN=myfun2) #Just 7 sp
All.Prey.Sum2<- summaryBy(d15N+d13C~Species, data=Prey3.2, FUN=myfun2) #GpIa
All.Prey.Sum3<- summaryBy(d15N+d13C~Species, data=Prey3.4, FUN=myfun2) #Just Gp
All.Prey.Sum4<- summaryBy(d15N+d13C~Species, data=Prey3.5, FUN=myfun2) #Just Gp, SaIa compbined
write.table(All.Prey.Sum, file="PmSources.csv", sep=",")
write.table(All.Prey.Sum2, file="PmSources-GpIa.csv", sep=",")
write.table(All.Prey.Sum3, file="PmSources-Gp.csv", sep=",")
write.table(All.Prey.Sum4, file="PmSources_Gp_IaSaCombined.csv", sep=",")
ggplot(All.Prey.Sum, aes(d13C.m, d15N.m, color=Species)) + geom_point(size=3)+
#geom_text(hjust=0.3,vjust = -0.7, size = 5)+
geom_errorbarh(aes(xmax=All.Prey.Sum$d13C.m+All.Prey.Sum$d13C.sd,xmin=All.Prey.Sum$d13C.m-All.Prey.Sum$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Prey.Sum$d15N.m+All.Prey.Sum$d15N.sd,ymin=All.Prey.Sum$d15N.m-All.Prey.Sum$d15N.sd, width = 0.01))+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
scale_color_viridis_d()+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.title=element_text(size=10))+
theme(legend.text=element_text(size=8))
#theme(legend.position = c(0.9, 0.75))
### This plot has each prey as a different color, and black text within plot
ggplot(All.Prey.Sum, aes(d13C.m, d15N.m, color=Species, label=Species)) + geom_point(size=3)+
geom_errorbarh(aes(xmax=All.Prey.Sum$d13C.m+All.Prey.Sum$d13C.sd,xmin=All.Prey.Sum$d13C.m-All.Prey.Sum$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Prey.Sum$d15N.m+All.Prey.Sum$d15N.sd,ymin=All.Prey.Sum$d15N.m-All.Prey.Sum$d15N.sd, width = 0.01))+
geom_text(color="black",hjust=-0.05,vjust = -0.7, size = 5)+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
scale_color_viridis_d()+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
Shape1<- c(3,8,17,18,7,19,15,13)
ggplot(All.Prey.Sum,aes(d13C.m,d15N.m, label=Species, shape=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Prey.Sum$d13C.m+All.Prey.Sum$d13C.sd,xmin=All.Prey.Sum$d13C.m-All.Prey.Sum$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Prey.Sum$d15N.m+All.Prey.Sum$d15N.sd,ymin=All.Prey.Sum$d15N.m-All.Prey.Sum$d15N.sd, width = 0.01))+
geom_text(hjust=-0.06,vjust = -0.7, size = 5)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_shape_manual(values=Shape1)+
theme_bw(base_size = 24, base_family = "Helvetica")
### Add glass squid & ragfish prey species;
### Prey as a different color, and black text within plot
ggplot(All.Prey.Sum2, aes(d13C.m, d15N.m, color=Species, label=Species)) + geom_point(size=3)+
geom_errorbarh(aes(xmax=All.Prey.Sum2$d13C.m+All.Prey.Sum2$d13C.sd,xmin=All.Prey.Sum2$d13C.m-All.Prey.Sum2$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Prey.Sum2$d15N.m+All.Prey.Sum2$d15N.sd,ymin=All.Prey.Sum2$d15N.m-All.Prey.Sum2$d15N.sd, width = 0.01))+
geom_text(color="black",hjust=-0.05,vjust = -0.7, size = 5)+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
scale_color_viridis_d()+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
#Add humboldt squid for biplot:
All.Prey.Sum5<-read.csv('/Users/laurenwild/Desktop/UAF/Thesis/StableIsotopes/Data/PmSources-GpIaDg.csv',sep=",",header=TRUE)
ggplot(All.Prey.Sum5, aes(Mean.d13C, Mean.d15N, color=Species, label=Species)) + geom_point(size=3)+
geom_errorbarh(aes(xmax=All.Prey.Sum5$Mean.d13C+All.Prey.Sum5$SD.d13C,xmin=All.Prey.Sum5$Mean.d13C-All.Prey.Sum5$SD.d13C, height = 0.01)) +
geom_errorbar(aes(ymax=All.Prey.Sum4$Mean.d15N+All.Prey.Sum5$SD.d15N,ymin=All.Prey.Sum5$Mean.d15N-All.Prey.Sum5$SD.d15N, width = 0.01))+
geom_text(color="black",hjust=-0.05,vjust = -0.7, size = 5)+
xlab(expression(paste(delta^13, "C (\u2030)")))+
ylab(expression(paste(delta^15, "N (\u2030)")))+
scale_color_viridis_d()+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
#----------------------------------------------------------------
### Prey data - descriptive stats:
#Describe range, mean, median, etc. d15N, d13C, and C:N ratios for each species
###---------------------------------------------------------
# Add sperm whales in to the top of it
Pm.Prey <- cbind(Prey3.3)
View(Pm.Prey)
library(gtools)
library(doBy)
#use sperm whale inner layer data frame
Pm2Inner$Species<-"Sperm Whale"
Pm2Inner2<-Pm2Inner[,c(1,3,4)]
PmInnerAvg<-summaryBy(d15N + d13C ~ Species, data=Pm2Inner2, FUN=myfun2)
Prey3.6<-Prey3.3[,c(1,30,31)]
Prey3.7<-Prey3.2[,c(1,30,31)]
Prey3.8<-Prey3.4[,c(1,30,31)]
Pm.Prey2<-rbind(Prey3.8, Pm2Inner2) #All 7 plus just Gp
Pm.Prey3<-rbind(Prey3.7, Pm2Inner2) #GpIa
Pm.Prey4<-rbind(Prey3.6, Pm2Inner2) #Just 7 main species
str(Pm.Prey2)
str(Pm.Prey3)
str(Pm.Prey4)
#Reduce data frame to just the columns I want:
#keeps <- c("SampleName", "d15N", "d13C", "Species")
#Pm.Prey2<-Pm.Prey2[keeps]
#use doBy to set up data sheet with averages and std.error of each species
myfun2<-function(x) {c(m=mean(x), sd=sd(x))}
All.Sp.Sum <-summaryBy(d15N+d13C~Species, data=Pm.Prey2, FUN=myfun2) #All 7 +Gp
View(All.Sp.Sum)
All.Sp.Sum2 <-summaryBy(d15N+d13C~Species, data=Pm.Prey3, FUN=myfun2) #All7+GpIa
View(All.Sp.Sum2)
All.Sp.Sum3 <-summaryBy(d15N+d13C~Species, data=Pm.Prey4, FUN=myfun2) #All 7
View(All.Sp.Sum3)
All.Sp.Sum4<- All.Sp.Sum2[-c(5), ] #Another way to take out ragfish... ?
View(All.Sp.Sum4)
All.Sp.Sum4.5<-rbind(All.Prey.Sum3, PmInnerAvg) #Another way to do it? all 7 sp + Gp
#Another way to add sperm whales in ....
#PmInnerSums<-summaryBy(d15N+d13C~Layer, data=Pm2Inner, FUN=myfun2)
#colnames(PmInnerSums)<- c("Species", "d15N.m", "d15N.sd", "d13C.m", "d13C.sd")
#PmInnerSums$Species <- as.character(PmInnerSums$Species)
#PmInnerSums$Species[PmInnerSums$Species == "Inner"] <- "Sperm Whale"
#All.Sp.Sum<-rbind(All.Prey.Sum, PmInnerSums)
#View(All.Sp.Sum)
Shape1<- c(3,8,17,18,7,19,15,13)
color3<- c('lightskyblue', 'darkorchid2','blue1','darkturquoise', 'yellow2', 'deeppink', "coral1", 'darkgoldenrod1')
#Just Main 7 species:
setwd('/Users/laurenwild/Desktop')
tiff(filename="AllSp.tiff", height = 12, width = 17, units = 'cm',
compression = "lzw", res = 200)
ggplot(All.Sp.Sum,aes(d13C.m,d15N.m, label=Species, color=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Sp.Sum$d13C.m+All.Sp.Sum$d13C.sd,xmin=All.Sp.Sum$d13C.m-All.Sp.Sum$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Sp.Sum$d15N.m+All.Sp.Sum$d15N.sd,ymin=All.Sp.Sum$d15N.m-All.Sp.Sum$d15N.sd, width = 0.01))+
geom_text(color='black', hjust=-0.03,vjust = -0.7, size = 4)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_viridis_d()+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
dev.off()
# Glass Squid, Ragfish included:
color4<- c('lightskyblue', 'lavenderblush3', 'darkorchid2','blue1','green4', 'darkturquoise', 'yellow2', 'deeppink', "coral1", 'darkgoldenrod1')
ggplot(All.Sp.Sum2,aes(d13C.m,d15N.m, label=Species, color=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Sp.Sum2$d13C.m+All.Sp.Sum2$d13C.sd,xmin=All.Sp.Sum2$d13C.m-All.Sp.Sum2$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Sp.Sum2$d15N.m+All.Sp.Sum2$d15N.sd,ymin=All.Sp.Sum2$d15N.m-All.Sp.Sum2$d15N.sd, width = 0.01))+
geom_text(color='black', hjust=-0.03,vjust = -0.7, size = 4)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_viridis_d()+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
setwd('/Users/laurenwild/Desktop')
tiff(filename="AllSp.tiff", height = 12, width = 17, units = 'cm',
compression = "lzw", res = 200)
ggplot(All.Sp.Sum2,aes(d13C.m,d15N.m, label=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Sp.Sum2$d13C.m+All.Sp.Sum2$d13C.sd,xmin=All.Sp.Sum2$d13C.m-All.Sp.Sum2$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Sp.Sum2$d15N.m+All.Sp.Sum2$d15N.sd,ymin=All.Sp.Sum2$d15N.m-All.Sp.Sum2$d15N.sd, width = 0.01))+
geom_text(color='black', hjust=-0.03,vjust = -0.7, size = 4)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
dev.off()
setwd('/Users/laurenwild/Desktop')
tiff(filename="AllSpGp.tiff", height = 12, width = 17, units = 'cm',
compression = "lzw", res = 200)
ggplot(All.Sp.Sum,aes(d13C.m,d15N.m, label=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Sp.Sum$d13C.m+All.Sp.Sum$d13C.sd,xmin=All.Sp.Sum$d13C.m-All.Sp.Sum$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Sp.Sum$d15N.m+All.Sp.Sum$d15N.sd,ymin=All.Sp.Sum$d15N.m-All.Sp.Sum$d15N.sd, width = 0.01))+
geom_text(color='black', hjust=-0.03,vjust = -0.7, size = 4)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
dev.off()
color5<- c('lightskyblue', 'lavenderblush3', 'darkorchid2','blue1', 'darkturquoise', 'yellow2', 'deeppink', "coral1", 'darkgoldenrod1')
ggplot(All.Sp.Sum3,aes(d13C.m,d15N.m, label=Species, color=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Sp.Sum3$d13C.m+All.Sp.Sum3$d13C.sd,xmin=All.Sp.Sum3$d13C.m-All.Sp.Sum3$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Sp.Sum3$d15N.m+All.Sp.Sum3$d15N.sd,ymin=All.Sp.Sum3$d15N.m-All.Sp.Sum3$d15N.sd, width = 0.01))+
geom_text(color='black', hjust=-0.03,vjust = -0.7, size = 4)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_viridis_d()+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
ggplot(All.Sp.Sum4,aes(d13C.m,d15N.m, label=Species, color=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Sp.Sum4$d13C.m+All.Sp.Sum4$d13C.sd,xmin=All.Sp.Sum4$d13C.m-All.Sp.Sum4$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Sp.Sum4$d15N.m+All.Sp.Sum4$d15N.sd,ymin=All.Sp.Sum4$d15N.m-All.Sp.Sum4$d15N.sd, width = 0.01))+
geom_text(color='black', hjust=-0.03,vjust = -0.7, size = 4)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_viridis_d()+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+
theme(legend.position="none")
##############################################################################
##### ------- Add Baseline Data, First Import and Reorganize it: ------- #####
##############################################################################
### COPEPODS ### BASELINE ###
Base<- read.table('/Users/laurenwild/Desktop/UAF/Thesis/StableIsotopes/Data/Baseline.Isotopes_forR.csv',sep=",",header=TRUE)
View(Base)
#Lipid Normalization to mathematically correct d13C values:
Base$d13C.LN<-(-3.32+0.99*Base$C.N.bulk)+Base$d13C.bulk #Uses Post 2007
Base$d13C.protein<-Base$d13C.bulk + (-6.39*(3.76-Base$C.N.bulk))/Base$C.N.bulk #Uses Hoffman et al. 2010
Base<-Base[,c(1:5,25,26,6:24)]
range(Base$d15N.bulk)
median(Base$d15N.bulk)
range(Base$d13C.LN)
median(Base$d13C.LN)
ggplot(Base, aes(d13C.LN, d15N.bulk, color=Station)) + geom_point(size=5)+
scale_x_continuous(name=expression(paste(delta^13, "C (\u2030)", " bulk")))+
scale_y_continuous(name=expression(paste(delta^15, "N (\u2030)", " bulk")))+
xlab(expression(paste(delta^13, "C (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
scale_color_viridis_d()+
theme(legend.justification= c(1,1), legend.position=c(1,1))
ggplot(Base, aes(d13C.LN, d15N.bulk, shape=Station)) + geom_point(size=3)+
#geom_text(hjust=-0.06,vjust = -0.7, size = 5)+
xlab(expression(paste(delta^13, "C (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.justification= c(1,1), legend.position=c(1,1))
#Average each station
library(doBy)
myfun1<-function(x) {c(m=mean(x) , v=var(x))}
myfun2<-function(x) {c(m=mean(x), sd=sd(x))}
Base.Avg <-summaryBy(d15N.bulk+d13C.LN~Station, data=Base, FUN=myfun2)
View(Base.Avg)
ggplot(Base.Avg, aes(d13C.LN.m, d15N.bulk.m, label=Station)) + geom_point(size=3)+
geom_text(hjust=-0.06,vjust = -0.7, size = 5)+
geom_errorbarh(aes(xmax=Base.Avg$d13C.LN.m+Base.Avg$d13C.LN.sd,xmin=Base.Avg$d13C.LN.m-Base.Avg$d13C.LN.sd, height = 0.01)) +
geom_errorbar(aes(ymax=Base.Avg$d15N.bulk.m+Base.Avg$d15N.bulk.sd,ymin=Base.Avg$d15N.bulk.m-Base.Avg$d15N.bulk.sd, width = 0.01))+
xlab(expression(paste(delta^13, "C (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.justification= c(1,1), legend.position=c(1,1))
Base.Avg$d15N<-Base.Avg$d15N.bulk.m
Base.Avg$d13C<-Base.Avg$d13C.LN.m
Base.Avg$d15N.sd<-Base.Avg$d15N.bulk.sd
Base.Avg$d13C.sd<-Base.Avg$d13C.LN.sd
Base.Final<-Base.Avg[,c(1,6,8,7,9)]
View(Base.Final)
ggplot(Base.Final, aes(d13C, d15N, label=Station)) + geom_point(size=3)+
geom_text(hjust=-0.06,vjust = -0.7, size = 5)+
geom_errorbarh(aes(xmax=Base.Final$d13C+Base.Final$d13C.sd,xmin=Base.Final$d13C-Base.Final$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=Base.Final$d15N+Base.Final$d15N.sd,ymin=Base.Final$d15N-Base.Final$d15N.sd, width = 0.01))+
xlab(expression(paste(delta^13, "C (\u2030)", " bulk")))+
ylab(expression(paste(delta^15, "N (\u2030)", " bulk")))+
theme_bw(base_size = 24, base_family = "Helvetica")+
theme(legend.justification= c(1,1), legend.position=c(1,1))
Base.Final3<-summaryBy(d15N+d13C~Species, data=Base.Final, FUN=myfun2)
View(Base.Final3)
#colnames(Base.Final3)<- c("Species", "d15N.m", "d15N.sd", "d13C.m", "d13C.sd")
Base.Final3$Species <- 'Neocalanus sp.'
Base.Final3<-Base.Final3[,c(5,1,2,3,4)]
Base.Final3$d13C.sd<-0.959673
View(Base.Final3)
All.Sp.Base.Sum<-rbind(All.Sp.Sum,Base.Final3)
View(All.Sp.Base.Sum)
color5<- c('lightskyblue', 'darkorchid2','blue1','darkturquoise', 'yellow2', 'deeppink', "coral1", 'darkgoldenrod1', 'grey2')
ggplot(All.Sp.Base.Sum,aes(d13C.m,d15N.m, label=Species, color=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Sp.Base.Sum$d13C.m+All.Sp.Base.Sum$d13C.sd,xmin=All.Sp.Base.Sum$d13C.m-All.Sp.Base.Sum$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Sp.Base.Sum$d15N.m+All.Sp.Base.Sum$d15N.sd,ymin=All.Sp.Base.Sum$d15N.m-All.Sp.Base.Sum$d15N.sd, width = 0.01))+
geom_text(color='black', hjust=-0.03,vjust = -0.7, size = 4)+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_color_manual(values=color4)+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(legend.position="none")
### ------------------------------------------------------ ###
### -------------------------------------------------------- ###
##### ANNIE'S DATA ####
myData <- myData[-c(2, 4, 6), ]
All.Sp.Sum.Annie<-All.Sp.Sum[-c(1,2,4,5,6,7),]
Shape3<- c(3,9,17)
setwd('/Users/laurenwild/Desktop')
tiff(filename="AnnieData.tiff", height = 12, width = 17, units = 'cm',
compression = "lzw", res = 200)
ggplot(All.Sp.Sum.Annie,aes(d13C.m,d15N.m, label=Species, shape=Species)) + geom_point(size=5) +
geom_errorbarh(aes(xmax=All.Sp.Sum.Annie$d13C.m+All.Sp.Sum.Annie$d13C.sd,xmin=All.Sp.Sum.Annie$d13C.m-All.Sp.Sum.Annie$d13C.sd, height = 0.01)) +
geom_errorbar(aes(ymax=All.Sp.Sum.Annie$d15N.m+All.Sp.Sum.Annie$d15N.sd,ymin=All.Sp.Sum.Annie$d15N.m-All.Sp.Sum.Annie$d15N.sd, width = 0.01))+
geom_text(hjust=-0.06,vjust = -0.7, size = 3)+
scale_x_continuous(name=expression(paste(delta^13, "C (\u2030)")), breaks=seq(-26,-16,2), limits=c(-26,-16))+
scale_y_continuous(name=expression(paste(delta^15, "N (\u2030)")), breaks=seq(7.5,18,2.5), limits=c(7.5,18))+
xlab(expression(paste(delta^13, "C (\u2030)",sep="")))+
ylab(expression(paste(delta^15, "N (\u2030)",sep="")))+
scale_shape_manual(values=Shape3)+
theme_bw(base_size = 24, base_family = "Helvetica") +
theme(legend.position="none")
dev.off()
###########################################################################
#---------------------------------------------------------------------------
###############################################################################
# Trophic level calculations:
TL<-NULL
for (i in 1:nrow(All.Sp.Base.Sum)) {
TL[i]<- 2 + ((All.Sp.Base.Sum$d15N.m[i] - All.Sp.Base.Sum$d15N.m[9]) / 2.12)
}
View(TL)
All.Sp.Base.Sum$TrophicLevel<-TL
write.table(All.Sp.Base.Sum, file="AllSpeciesBaseAvgsTL.csv", sep=",")
#Using Bree's equation, and my 2.12 enrichment factor, and Pm inner layer skin:
Pm2Inner$TL<-2 + (Pm2Inner$d15N - Base.Final3$d15N.m) / 2.12
mean(Pm2Inner$TL)
sd(Pm2Inner$sd)
mean(2 + (Ia$d15N.bulk - Base.Final3$d15N.m) / 2.12) #TL = 4.25 for ragfish
sd(2 + (Ia$d15N.bulk - Base.Final3$d15N.m) / 2.12) #0.24
mean(2 + (Af2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #TL = 4.59 sablefish
sd(2 + (Af2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #0.40
mean(2 + (Cy2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #TL = 4.45 grenadier
sd(2 + (Cy2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #0.44
mean(2 + (Sb2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #TL = 4.91 shortraker
sd(2 + (Sb2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #0.0.44
mean(2 + (Sa2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #TL = 4.36 dogfish
sd(2 + (Sa2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #0.44
mean(2 + (Bm2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #TL = 3.50 magister squid
sd(2 + (Bm2$d15N.bulk - Base.Final3$d15N.m) / 2.12) #0.51
mean(2 + (Rb$d15N - Base.Final3$d15N.m) / 2.12) #TL = 5.36 skate
sd(2 + (Rb$d15N - Base.Final3$d15N.m) / 2.12) #0.32
mean(2 + (Gp$d15N.bulk - Base.Final3$d15N.m) / 2.12) #TL = 3.98 glass squid
sd(2 + (Gp$d15N.bulk - Base.Final3$d15N.m) / 2.12) #0.24
mean(2 + (Or$d15N.bulk - Base.Final3$d15N.m) / 2.12) #TL = 5.66 clubhook squid
sd(2 + (Or$d15N.bulk - Base.Final3$d15N.m) / 2.12) #0.38
#Using Geraldine's 1.6 enrichment factor, and Pm Inner Layer skin:
2 + (PmInnerSums$d15N.m - Base.Final3$d15N.m) / 1.6 #TL = 6.74
#Using Geraldine's 1.6 enrichment factor, and Pm Full:
2 + (PmFull.mean - Base.Final3$d15N.m) / 1.6 #TL = 6.77
|
72fc9f690dd99ae5ff6835348e2240e16fddc1f8 | ec83cb8bde12baf50e7eef22c32c8f5c6932ecc2 | /2016_June/analysis.R | 9a5950b1924481c5af639b98c40d5ce352e3a8b7 | [
"MIT"
] | permissive | johnmyleswhite/sf_politics | b62ff68f6761057fa093031e182fbf7cec827b76 | b4fa3ea30974d1d3327a20f75ee13b7da19be91e | refs/heads/master | 2021-07-17T10:39:43.477353 | 2016-11-28T16:21:09 | 2016-11-28T16:21:09 | 59,607,979 | 2 | 2 | null | 2016-11-28T16:21:09 | 2016-05-24T20:54:24 | R | UTF-8 | R | false | false | 8,213 | r | analysis.R | ###############################################################################
#
# Set up everything for ideal point analysis.
#
###############################################################################
# We use the following packages.
library("ggplot2")
library("dplyr")
library("reshape2")
library("stringr")
library("rstan")
library("extrafont")
# Optimize some Stan configuration settings.
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
# Set a flag to determine whether we perform computationally intensive and
# exact calculations or use faster approaches during interactive development.
interactive_mode <- FALSE
# In interactive development, we do very light MCMC computations even though
# the resulting estimates are quite bad. For production results, we do much
# more computation.
if (interactive_mode) {
mcmc_iter <- 2500
mcmc_thin <- 1
} else {
mcmc_iter <- 25000
mcmc_thin <- 5
}
# We'll need monospace fonts for plotting things with formatted labels.
font_import("mono")
# Load the raw data in long form.
endorsements <- read.csv(
file.path("data", "endorsements.csv"),
stringsAsFactors = FALSE
)
# Replace verbal endorsement labels with numbers.
endorsements <- transform(
endorsements,
endorsement = ifelse(
endorsement == "Yes",
1,
ifelse(
endorsement == "No",
0,
NA
)
)
)
# Translate the long-form data set into a matrix where endorsers are rows and
# candidates are columns. We incporate category labels into the column names to
# distinguish repeated candidates such as Scott Wiener and Jane Kim, who are up
# for election to multiple offices.
wide_endorsements <- dcast(
endorsements,
endorser ~ category + candidate,
value.var = "endorsement"
)
# Store these constants for use downstream.
n_endorsers <- nrow(wide_endorsements)
n_candidates <- ncol(wide_endorsements) - 1
###############################################################################
#
# Compute ideal points using Stan.
#
###############################################################################
# Convert the endorsements data.frame into a matrix after removing the names of
# the endorsers.
M <- as.matrix(wide_endorsements[, (1 + 1):(n_candidates + 1)])
# Convert the standard dense matrix ito a COO format using three vectors.
i <- rep(1:n_endorsers, times = n_candidates)
j <- rep(1:n_candidates, each = n_endorsers)
v <- as.vector(M)
# Stan does not support NA's in its input data, so we drop those entries.
non_missing_inds <- which(!is.na(v))
i <- i[non_missing_inds]
j <- j[non_missing_inds]
v <- v[non_missing_inds]
# We'll use these functions to initialize the intercept-like parameters.
logit <- function (p) {log(p / (1 - p))}
bound_probs <- function (p) {
n <- length(p)
return(pmin(pmax(p, rep(0.001, n)), rep(0.999, n)))
}
# Compute 1-dimensional ideal points using Stan.
res <- stan(
file = file.path("stan_code", "ideal_points.stan"),
data = list(
n_rows = n_endorsers,
n_cols = n_candidates,
n_obs = length(v),
i = i,
j = j,
v = v
),
iter = mcmc_iter,
warmup = 100 * mcmc_thin,
chains = 1,
thin = mcmc_thin,
init = list(
list(
a = logit(bound_probs(rowMeans(M, na.rm = TRUE))),
x = rnorm(n_endorsers, 0, 1),
b = logit(bound_probs(colMeans(M, na.rm = TRUE))),
y = rnorm(n_candidates, 0, 1)
)
)
)
# Extract parameters from the Stan results.
params <- summary(res)$summary
# The params data.frame has idiosyncratic row names we'll use for indexing.
a_inds <- paste0("a[", 1:n_endorsers, "]")
b_inds <- paste0("b[", 1:n_candidates, "]")
x_inds <- paste0("x[", 1:n_endorsers, "]")
y_inds <- paste0("y[", 1:n_candidates, "]")
# Store ideal points for endorsers.
ideal_points_endorsers <- data.frame(
endorser = wide_endorsements[, 1],
mean = params[x_inds, 1],
lower = params[x_inds, 4],
upper = params[x_inds, 8],
stringsAsFactors = FALSE
)
# Save the ideal points to a CSV file.
write.csv(
ideal_points_endorsers,
file = file.path("ideal_points", "endorsers.csv"),
row.names = FALSE
)
# Plot ideal points for endorsers.
p <- ggplot(
ideal_points_endorsers,
aes(x = reorder(endorser, mean), y = mean)
) +
geom_point() +
geom_errorbar(aes(ymin = lower, ymax = upper)) +
geom_hline(yintercept = 0, alpha = 0.3) +
xlab("") +
ylab("Ideal Point") +
coord_flip() +
theme_bw() +
theme(text = element_text(family = "mono"))
# Save the plot to a PNG file.
ggsave(
file.path("ideal_points", "endorsers.png"),
height = 14,
width = 10
)
# The names of the wide_endorsements columns are a mixture of category
# information and candidate names, so we split them apart again in their
# official order.
candidate_names <- names(wide_endorsements)[(1 + 1):(n_candidates + 1)]
candidate_categories <- sapply(
strsplit(candidate_names, "_"),
function (pair) {pair[1]}
)
candidate_names <- sapply(
strsplit(candidate_names, "_"),
function (pair) {pair[2]}
)
# Store ideal points for candidates.
ideal_points_candidates <- data.frame(
category = candidate_categories,
candidate = candidate_names,
mean = params[y_inds, 1],
lower = params[y_inds, 4],
upper = params[y_inds, 8],
stringsAsFactors = FALSE
)
# Save the ideal points to a CSV file.
write.csv(
ideal_points_candidates,
file = file.path("ideal_points", "candidates.csv"),
row.names = FALSE
)
# Enumerate candidates on the two core slates.
candidate_slates <- rbind(
endorsements %>%
filter(endorser == "Progress Dems", endorsement == 1) %>%
mutate(slate = "Progress"),
endorsements %>%
filter(endorser == "SF Tenants and Families", endorsement == 1) %>%
mutate(slate = "Reform")
) %>%
dplyr:::select(category, candidate, slate)
# Join in information about slates to our ideal points.
ideal_points_candidates <- left_join(
ideal_points_candidates,
candidate_slates,
by = c("category", "candidate")
)
# Mark unaffiliated candidates.
ideal_points_candidates <- transform(
ideal_points_candidates,
slate = ifelse(is.na(slate), "Unaffiliated", slate)
)
# Make pretty-printable names by padding strings before concatenating them.
longest_category_name <- with(
ideal_points_candidates,
max(str_length(category))
)
longest_candidate_name <- with(
ideal_points_candidates,
max(str_length(candidate))
)
ideal_points_candidates <- transform(
ideal_points_candidates,
full_name = paste(
str_pad(
category,
longest_category_name,
side = "right",
pad = " "
),
str_pad(
candidate,
longest_candidate_name,
side = "left",
pad = " "
),
sep = " "
)
)
# Plot ideal points for candidates.
dccc <- c("District 17", "District 19")
p <- ggplot(
ideal_points_candidates %>% filter(category %in% dccc),
aes(
x = reorder(full_name, mean),
y = mean,
color = slate
)
) +
geom_point() +
geom_errorbar(aes(ymin = lower, ymax = upper)) +
geom_hline(yintercept = 0, alpha = 0.3) +
xlab("") +
ylab("Ideal Point") +
coord_flip() +
theme_bw() +
theme(text = element_text(family = "mono")) +
scale_color_manual(values = c("#7570b3", "#1b9e77", "#000000"))
# Save the plot to a PNG file.
ggsave(
file.path("ideal_points", "dccc.png"),
height = 14,
width = 10
)
# Plot ideal points for props.
p <- ggplot(
ideal_points_candidates %>% filter(category == "Proposition"),
aes(
x = reorder(full_name, mean),
y = mean
)
) +
geom_point() +
geom_errorbar(aes(ymin = lower, ymax = upper)) +
geom_hline(yintercept = 0, alpha = 0.3) +
xlab("") +
ylab("Ideal Point") +
coord_flip() +
theme_bw() +
theme(text = element_text(family = "mono"))
# Save the plot to a PNG file.
ggsave(
file.path("ideal_points", "props.png"),
height = 10,
width = 10
)
|
3a1f48aa22fdd37abf7bd04f60e835db85b1299f | 04f8fb0f857d94f316271f10138765e24089406d | /Duane Blehm's Code/Zero Gravityƒ/ZeroGravity.R | 7db72d42b4d06b4907ca99616d4dd941a32c25f5 | [] | no_license | arcanebyte/blehm | 070f47b0ee7d99af740be0f1332d564806024caf | 32f10d0d8a3d6309aca7be40f9aa29c755f4dd10 | refs/heads/master | 2020-03-23T18:38:13.333075 | 2016-10-16T12:22:42 | 2016-10-16T12:22:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 358 | r | ZeroGravity.R | * ZeroGravity.R
ZeroGravity.rsrc
TYPE ZGRV = STR
,0
ZeroGravity ver1.0, by Duane Blehm, 2/12/87
TYPE MENU
,1
\14
About Zero...
(-
,256
File
Quit /Q
,257
Options
Help...
LoScore...
! Sound
,258
Works
Peek OffScreen
Source Code...
,259
(Any Key to Exit)
Hi!
* the rest of the resources where built with ResEdit
INCLUDE ZeroGravity/Rsrc
|
8c56c5eb1b6801b00f239d0fa4d8fd322a6c9cee | b1610db6e3669527acc4767e7e36b22cbd32b3d8 | /WGCNA_top_co_expressed_genes.R | 1b66073ebf1488c56ecf11c33835c01b9f309375 | [] | no_license | rakrasnoff/RNAseqAnalysisPipelineScripts | 5d566a0fd4b89703f1560d938757355840a4c4c4 | cef81776ced87d47da1cb2368a522c84407d5b69 | refs/heads/master | 2020-04-06T07:12:12.869782 | 2016-09-07T19:06:05 | 2016-09-07T19:06:05 | 64,955,709 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,441 | r | WGCNA_top_co_expressed_genes.R | #WGCNA analysis of top-co-expressed genes
#############################################################################################################################################
#get list of genes best correlated to each index gene
getTopCor<-function(indexGene, expressionData, topNumber){
#calculate pairwise correlations between index gene and all other genes
index=(which(colnames(datExpr) %in% indexGene))
tmp=cor(expressionData[,index], datExpr, method="pearson")
#tmp[lower.tri(tmp, diag=TRUE)]<-NA
#tmp[abs(tmp)<=0.7]<-NA
#order by correlation, choose top20 that are also abs(correlation)>0.7
topCorr=order(abs(tmp), decreasing=T)[2:(topNumber+1)]
geneCor=tmp[topCorr]
names(geneCor)=colnames(tmp)[topCorr]
geneCor=geneCor[abs(geneCor)>=0.7]
names(geneCor)=
return(geneCor)
}
#using list of network genes, get all connections >0.7
getNetworkConnections<-function(networkGenes, expressionData){
#calculate pairwise correlations between index gene and all other genes
index=(which(colnames(datExpr) %in% networkGenes))
tmp=cor(expressionData[,index], use="pairwise.complete.obs", method="pearson")
tmp[lower.tri(tmp, diag=TRUE)]<-NA
tmp[abs(tmp)<=0.7]<-NA
index2=which(!is.na(tmp), arr.ind=T)
pairs=cbind(colnames(tmp)[index2[,2]],tmp[index2], rownames(tmp)[index2[,1]])
return(pairs)
}
#genes dex in the same direction versus control across 5 or more conditions
sharedDex=c("1810008I18Rik","2010003K11Rik","Arntl","C330021F23Rik","Ccl2","Cdkn1a","Cidec","Cyp26b1","Cyp2b10",
"Foxq1","Gadd45b","Gm16551","Hsd3b5","Hspa1b","Klhdc7a","Mycn","Nlrp12","Pdzk1ip1","Relb","Srebf1","Tuba8","Ube2c", "Chrna4")
myTopCorr=lapply(sharedDex, function(x) getTopCor(x, datExpr, 10))
myBestGenes=unique(names(unlist(myTopCorr)))
genePairs=getNetworkConnections(myBestGenes, datExpr)
write.table(genePairs, "testGenepairs.txt", row.names=F, col.names=F, append=F, quote=F, sep="\t")
#############################################################################################################################################
# Choose a set of soft-thresholding powers
powers = c(c(1:10), seq(from = 12, to=20, by=2))
# Call the network topology analysis function
sft = pickSoftThreshold(datExpr, powerVector = powers, verbose = 5)
#plot results
setEPS()
postscript(file=paste(myStamp, "choosePower.eps", sep="_"), width=10, height=5, paper="special", colormodel="srgb", horizontal=FALSE)
par( ps=12, font.lab=2, font.main=2, omi=c(0.5,0,0,0.5), mfrow = c(1,2),
mgp=c(2.75, 0.5, 0), las=0, cex=1, cex.lab=1, cex.main=1, cex.axis=1)
cex1=0.9
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.90,col="red")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
#choose 6 as softthresholding power (SFT.R.sq=0.941), then calculate adjacencies
softPower = 6
#unsigned
# adjacency = adjacency(datExpr, power = softPower, type="unsigned")
#signed
adjacency = adjacency(datExpr, power = softPower, type="signed")
# Turn adjacency into topological overlap
TOM = TOMsimilarity(adjacency)
dissTOM = 1-TOM
#use hierarchical clustering to create dendrogram of gene-gene relationships
# Call the hierarchical clustering function
geneTree = flashClust(as.dist(dissTOM), method = "average");
# Plot the resulting clustering tree (dendrogram)
# plot(geneTree, xlab="", sub="", main = "Gene clustering on TOM-based dissimilarity",
# labels = FALSE, hang = 0.04)
#identify modules by dynamic branch cutting
# We like large modules, so we set the minimum module size relatively high:
minModuleSize = 30
# Module identification using dynamic tree cut:
dynamicMods = cutreeDynamic(dendro = geneTree, distM = dissTOM,
deepSplit = 2, pamRespectsDendro = FALSE,
minClusterSize ... |
98996c2d5001c85055d47b6a8a04812ad4c655a7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fastAdaboost/examples/get_tree.Rd.R | 237735c6a2343ab4aeda90ca60fe543c211b5e8f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 323 | r | get_tree.Rd.R | library(fastAdaboost)
### Name: get_tree
### Title: Fetches a decision tree
### Aliases: get_tree
### ** Examples
fakedata <- data.frame( X=c(rnorm(100,0,1),rnorm(100,1,1)), Y=c(rep(0,100),rep(1,100) ) )
fakedata$Y <- factor(fakedata$Y)
test_adaboost <- adaboost(Y~X, fakedata, 10)
tree <- get_tree(test_adaboost,5)
|
353ebed8fbeb9311a68d8dcc6743941af40851d0 | 40472765d5b226031f97d9371e503d8b47e66904 | /R code/Auxiliary code/Resample residuals vs. outcomes.R | 386015c168fde044947bb37ac32bc2b04f228e1f | [] | no_license | guhjy/multiple_outcomes | 6157d0896d85fa23f940b834378407ca8b83ff4d | 57e5acb1e400c77271e315d16c3a890e489326e2 | refs/heads/master | 2020-04-06T08:50:35.703178 | 2018-11-13T04:08:37 | 2018-11-13T04:08:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,042 | r | Resample residuals vs. outcomes.R |
########################### REGRESSION ON ORIGINAL DATA ###########################
# simulate from multiple regression model, comparing residual resampling to full-case resampling
# super bimodal on sex
n = 1000
male = rbinom( n=n, size=1, prob=0.5 )
X = rnorm( n, mean = 4 * male, sd = 1 )
bmale = 15
bX = -1
Y = rnorm( n, mean = bmale * male + bX * X, sd = 1 )
d = data.frame( X, Y, male )
plot(X,Y)
# original data
library(ggplot2)
colors = c("pink", "blue")
ggplot( data = d, aes( x = X, y = Y, color = as.factor(male) ) ) + geom_point(size=3) +
scale_color_manual(values = colors) + theme_bw()
# super bimodal marginally on sex
ggplot( data = d, aes( x = Y, color = as.factor(male) ) ) + geom_density() +
scale_color_manual(values = colors) + theme_bw()
# regression
( mo = lm( Y ~ X + male, data = d ) )
lm(Y~X, data=d)
# are the residuals normal?
# yes, as expected
hist(mo$residuals)
########################### REGRESSION ON DATA RESAMPLING JUST Y ###########################
# THIS DOES NOT WORK BECAUSE RESIDUALS ARE NO LONGER NORMAL.
# SO WE DEFINITELY CANNOT DO THIS APPROACH.
d$Y2 = sample( d$Y, replace = TRUE )
# this plot shows that covariates are correlated, but association between X and Y
# key point: association between sex and Y also lost
ggplot( data = d, aes( x = X, y = Y2, color = as.factor(male) ) ) + geom_point(size=3) +
scale_color_manual(values = colors) + theme_bw()
ggplot( data = d, aes( x = Y2, color = as.factor(male) ) ) + geom_density() +
scale_color_manual(values = colors) + theme_bw()
# regression
( m2 = lm( Y2 ~ X + male, data = d ) )
# are the residuals normal?
# NOPE. THIS IS A PROBLEM.
hist(m2$residuals)
########################### REGRESSION ON DATA RESAMPLING RESIDUALS (WESTFALL) ###########################
# WORKS (NORMAL RESIDUALS)
d$Yr = sample( mo$residuals, replace = TRUE )
ggplot( data = d, aes( x = X, y = Yr, color = as.factor(male) ) ) + geom_point(size=3) +
scale_color_manual(values = colors) + theme_bw()
# now the outcomes are normal
ggplot( data = d, aes( x = Yr, color = as.factor(male) ) ) + geom_density() +
scale_color_manual(values = colors) + theme_bw()
# regression
( m3 = lm( Yr ~ X + male, data = d ) )
# are the residuals normal?
hist(m3$residuals)
########################### REGRESSION ON DATA RESAMPLING RESIDUALS (FOX) ###########################
# WORKS (NORMAL RESIDUALS)
# resample from residuals and attach to Y-hats
d$Yf = fitted(mo) + sample( mo$residuals, replace = TRUE )
# this looks exactly like original data, as promised
ggplot( data = d, aes( x = X, y = Yf, color = as.factor(male) ) ) + geom_point(size=3) +
scale_color_manual(values = colors) + theme_bw()
# now the outcomes are normal
ggplot( data = d, aes( x = Yf, color = as.factor(male) ) ) + geom_density() +
scale_color_manual(values = colors) + theme_bw()
# regression
# in practice with this approach, we would test vs. H0: beta = beta.hat
( m3 = lm( Yr ~ X + male, data = d ) )
# are the residuals normal?
# YES
hist(m3$residuals)
|
f40e4a2ed98e3215bd6678b10018607a1655004a | 9b66d2e4bd0beb946d9dba67e6de1d219094feeb | /R/autokey.R | 69fc6ec7e93b7eb0dbc7955941441f349d828fe3 | [] | no_license | decisionpatterns/data.table.plus | 531ccd1cf567b7f046fe6f1d8d63181238d49728 | b8c45d74e4bb4afe8cbaffbe9f69506924cd0148 | refs/heads/master | 2020-08-11T06:54:58.001409 | 2020-06-10T06:02:10 | 2020-06-10T06:02:10 | 214,511,098 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,396 | r | autokey.R | #' Automatically set data.table keys
#'
#' Atomatically sets the keys of a data.table based on \code{options}.
#'
#' @param x data.table that will get autokeys set.
#' @param keys character; the values to set for keys if present in \code{dt}.
#'
#' \code{autokey} sets the keys for a data.table. The default is to look at the
#' \code{getOption('datatable.autokey')} for the list available keys. It is equivalent
#' to setting the \code{cols} argument of \code{setkeyv} equivalent to:
#'
#' \code{ intersect( names(x), getOption('autokey') ) }
#'
#' @seealso
#' \code{setkeyv} in the \code{data.table} package.
#'
#' @examples
#' options( autokey='Species' )
#' data(iris)
#' setDT(iris)
#' autokey(iris)
#'
#' @export
autokey <- function( x, keys=getOption('datatable.autokey') ) {
if( data.table::is.data.table(x) )
data.table::setkeyv( x, intersect( names(x), keys ) )
}
#' Set the autokeys
#'
#' Set the column for autokeys
#'
#' @param cols character; column names to be automatically set as keys
#'
#' This is nothing more than a wrapper for options(autokey=x)
#'
#' @rdname autokey
#' @export
set_autokeys <- function(cols) options(datatable.autokey=cols)
#' Get the autokeys
#'
#' Get the vector of autokeys
#'
#' Simple wrapper of \code{getOption('datatable.autokey')}
#' @rdname autokey
#' @export
autokeys <- function() getOption( 'datatable.autokey')
|
e1148900922c743f6d5205d640eb3489d5ca317d | fe1fab57762a61ecfcf017efd8afb26a2b81e65d | /HW2/hw2.R | b515dd3e36f4fc5e24664745b90f51e710257cf1 | [] | no_license | klinvill/CSCI-183-Data-Science | 51d057758a9aa163ecd5da2a92248cd6314de48e | 5716fdba269dd743011a1e70d9a8b71ee0bb57e7 | refs/heads/master | 2020-12-24T16:23:40.474823 | 2015-06-06T15:50:54 | 2015-06-06T15:50:54 | 33,423,379 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,984 | r | hw2.R | # Goal:
# Predict survival or death for each passenger
#
# General Observations:
# Class mattered, more 1st class people had a >50% chance of survival, 2nd class was
# roughly 50-50, 3rd class had ~25% chance of survival
# Sex mattered, a large majority of females survived while a large majority of males
# died
# Having 1 or two siblings/spouses and/or parents/children aboard increased odds of
# survival, but mainly only for females
# Very young children were likely to survive while young men were very likely to die
library(ggplot2)
library(Amelia)
library(Hmisc)
train <- read.csv("~/Desktop/CSCI 183 Data Science/HW2/train.csv")
test <- read.csv("~/Desktop/CSCI 183 Data Science/HW2/test.csv")
# Exploratory Data Analysis
qplot(factor(Survived), data=train, facets=.~Pclass)
qplot(factor(Survived), data=train, facets=.~Sex)
qplot(factor(SibSp), data=train, facets=Sex~Survived)
qplot(factor(Parch), data=train, facets=Sex~Survived)
qplot(Age, data=train, facets=Sex~Survived, binwidth=5)
qplot(Fare, data=train, facets=.~Pclass, geom="density")
train$CabinGroup <- substring(train$Cabin, 1, 1)
train$CabinNum <- substring(train$Cabin, 2)
qplot(CabinGroup, data=train, facets=.~Pclass)
qplot(CabinNum, data=subset(train, CabinNum != ""), facets=Survived~.)
qplot(Fare, data=subset(train, CabinGroup != ""), facets=CabinGroup~.)
missmap(train, main="Titanic Training Data - Missings Map", col=c("yellow", "black"), legend=FALSE)
# Extract titles from the names
train$Title = rapply(strsplit(as.character(train$Name), "[,.] "), function(name) name[2])
summary(factor(train$Title))
qplot(factor(train$Title))
bystats(train$Age, train$Title, fun=median)
# Impute Age by using the median age per each title
get_medians <- function(data) {
titles <- unique(data$Title)
medians <- sapply(titles, function(title) median(subset(train, Title==title & !is.na(Age))$Age))
medians <- impute(medians, median)
medians
}
impute_median_by_title <- function(data, medians) {
for (i in 1:nrow(data)){
if (is.na(data[i,]$Age)){
data[i,]$Age <- medians[[data[i,]$Title]]
}
}
# Return the data frame
data
}
train <- impute_median_by_title(train, get_medians(train))
# Logistic Regression with class, sex, and age
# all variables significant
classifier_1 <- glm(Survived ~ Pclass + Sex + Age, data=train, family="binomial")
summary(classifier_1)
# Logistic Regression with siblings/spouses and parents/children in addition to class, sex, and age
# parents/children insignificant
classifier_2 <- glm(Survived ~ Pclass + Sex + Age + SibSp + Parch, data=train, family="binomial")
summary(classifier_2)
# Logistic Regression with Embarked in addition to siblings/spouses class, sex, and age
# Embarked insignificant
classifier_3 <- glm(Survived ~ Pclass + Sex + Age + SibSp + Embarked, data=train, family="binomial")
summary(classifier_3)
# The best classifier of the bunch
classifier_4 <- glm(Survived ~ Pclass + Sex + Age + SibSp, data=train, family="binomial")
summary(classifier_4)
confint(classifier_4)
# Same as classifier 4 but treats class as a factor. Actually gives a worse response.
classifier_5 <- glm(Survived ~ factor(Pclass) + Sex + Age + SibSp, data=train, family="binomial")
summary(classifier_5)
confint(classifier_5)
classifier_6 <- glm(Survived ~ Pclass + Sex, data=train, family="binomial")
summary(classifier_6)
classifier_7 <- glm(Survived ~ Pclass + Sex + SibSp, data=train, family="binomial")
summary(classifier_7)
classifier_8 <- glm(Survived ~ factor(Pclass) + Sex + SibSp, data=train, family="binomial")
summary(classifier_8)
# Check predictions against training set
head((predict(classifier_4, type="response") > 0.5) + 0)
head(train$Survived)
# Impute the test data
test$Title = rapply(strsplit(as.character(test$Name), "[,.] "), function(name) name[2])
missmap(test, main="Titanic Training Data - Missings Map", col=c("yellow", "black"), legend=FALSE)
bystats(test$Age, test$Title, fun=median)
# Use the training and test set medians
test <- impute_median_by_title(test, get_medians(rbind(subset(train, select=c("Age", "Title")), subset(test, select=c("Age", "Title")))))
missmap(test, main="Titanic Training Data - Missings Map", col=c("yellow", "black"), legend=FALSE)
bystats(test$Age, test$Title, fun=median)
# Predict test survival using classifier_4
test$Survived <- ((predict(classifier_4, newdata=test, type="response") > 0.5) + 0)
submission <- subset(test, select=c("PassengerId", "Survived"))
head(submission)
write.csv(submission, file="~/Desktop/CSCI 183 Data Science/HW2/prediction_4.csv", row.names=FALSE)
# Predict test survival using classifier_1
test$Survived <- ((predict(classifier_1, newdata=test, type="response") > 0.5) + 0)
submission <- subset(test, select=c("PassengerId", "Survived"))
head(submission)
write.csv(submission, file="~/Desktop/CSCI 183 Data Science/HW2/prediction_1.csv", row.names=FALSE)
# Predict test survival using classifier_6
test$Survived <- ((predict(classifier_6, newdata=test, type="response") > 0.5) + 0)
submission <- subset(test, select=c("PassengerId", "Survived"))
head(submission)
write.csv(submission, file="~/Desktop/CSCI 183 Data Science/HW2/prediction_6.csv", row.names=FALSE)
# Predict test survival using classifier_7, best result on kaggle so far likely do to my imputing the Ages
# first classifier to beat the gender model
test$Survived <- ((predict(classifier_7, newdata=test, type="response") > 0.5) + 0)
submission <- subset(test, select=c("PassengerId", "Survived"))
head(submission)
write.csv(submission, file="~/Desktop/CSCI 183 Data Science/HW2/prediction_7.csv", row.names=FALSE)
# Predict test survival using classifier_8
test$Survived <- ((predict(classifier_8, newdata=test, type="response") > 0.5) + 0)
submission <- subset(test, select=c("PassengerId", "Survived"))
head(submission)
write.csv(submission, file="~/Desktop/CSCI 183 Data Science/HW2/prediction_8.csv", row.names=FALSE)
|
dfbbb974ceda5f78fc62907b2db060f0fa981d1f | 432a02b2af0afa93557ee16176e905ca00b653e5 | /GSC/Thyroid_markers/Anaplastic/complete_cohort_analysis/ClusterSamplesByMarker.R | 092a87eb9a407503526ebe9291ab8522727b209d | [] | no_license | obigriffith/analysis-projects-R | 403d47d61c26f180e3b5073ac4827c70aeb9aa6b | 12452f9fc12c6823823702cd4ec4b1ca0b979672 | refs/heads/master | 2016-09-10T19:03:53.720129 | 2015-01-31T19:45:05 | 2015-01-31T19:45:05 | 25,434,074 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,871 | r | ClusterSamplesByMarker.R | library("gplots") #For advanced heatmaps
setwd("C:/Documents and Settings/obig/My Documents/Projects/Thyroid_markers/Anaplastic/complete_cohort_analysis")
datafile="ATC_all_deconvoluted_data_62markers_23JAN07_nocalcpos_primfoci.txt"
data=read.table(datafile, header = TRUE, na.strings = "NA", sep="\t")
type=data[,7]
marker_data=data[,8:69]
diff_foci=data[,6]
diff_foci[diff_foci==0]="N"
diff_foci[diff_foci==1]="Y"
#Create a vector assigning a color to each patient based on pathology for the color side bar
typecolors=as.vector(type)
typecolors[typecolors=="epithelioid"]="#9ACD32" #yellowgreen
typecolors[typecolors=="spindled"]="#006400" #darkgreen
typecolors[typecolors=="squamoid"]="#FF0000" #red
diff_foci_colors=diff_foci
diff_foci_colors[diff_foci_colors=="N"]="#9ACD32" #yellowgreen
diff_foci_colors[diff_foci_colors=="Y"]="#006400" #darkgreen
###To create a heatmap of all data###
#Try different color scheme and add a score key
#Choose colors for the five possible scores (0,1,2,3,4)
score_colors=c("#F0F8FF","#B9D3EE","#00BFFF","#483D8B","#00008B")
#Specify column names
all_col_names=colnames(marker_data)
pdf("ATC_all_deconvoluted_data_62markers_23JAN07_nocalcpos_primfoci_heatmap_wkey.pdf")
x=as.matrix(marker_data)
rownames(x)=type
heatmap.2(x, na.rm = TRUE, scale="none", key=TRUE, symkey=FALSE, density.info="none", trace="none", labRow=FALSE, labCol=all_col_names, col=score_colors, RowSideColors=typecolors, cexRow=0.8, cexCol=0.60)
dev.off()
pdf("ATC_all_deconvoluted_data_62markers_23JAN07_nocalcpos_primfoci_heatmap_wkey_df.pdf")
x=as.matrix(marker_data)
rownames(x)=type
heatmap.2(x, na.rm = TRUE, scale="none", key=TRUE, symkey=FALSE, density.info="none", trace="none", labRow=FALSE, labCol=all_col_names, col=score_colors, RowSideColors=diff_foci_colors, cexRow=0.8, cexCol=0.60)
dev.off()
|
e28eed5a96db708256fb3c487ba8432b9989be83 | 6e7af9b27cf18bb4633ad9d0b63a7e8ed9a887fb | /man/Sensor-class.Rd | a5855e72933777c71a78609a5f6e35c0a0f99819 | [
"MIT"
] | permissive | ApfeldLab/SensorOverlord | 0fc62dd3c11b702cd477d0692085ea7be46911a7 | 2fbe7e0d0963561241d5c1e78dd131211e1b31a0 | refs/heads/master | 2022-12-27T15:20:27.343783 | 2020-10-13T23:28:48 | 2020-10-13T23:28:48 | 176,821,341 | 2 | 0 | null | 2020-06-14T15:37:09 | 2019-03-20T21:40:17 | R | UTF-8 | R | false | true | 591 | rd | Sensor-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sensor_Classes.R
\docType{class}
\name{Sensor-class}
\alias{Sensor-class}
\title{An S4 class to represent a 2-state sensor}
\description{
An S4 class to represent a 2-state sensor
}
\section{Slots}{
\describe{
\item{\code{Rmin}}{To represent the ratio emission value R in the minimum state}
\item{\code{Rmax}}{to represent the ratio emission value R in the maximum state}
\item{\code{delta}}{To represent the ratio between emission in the maximum and
minimum states in the second wavelength of the ratio.}
}}
|
dd561dc93f1ce432c3bb55a2c4d1c58a98df0a51 | 369c181cab199cb4b41c670dc1556056118facb6 | /R/lca.R | 6ba59482b377bde21c2d0741e93e185693549172 | [] | no_license | igollini/lvm4net | 8c8d6ff6ca03a07203e42997465cfccbccf95fc4 | 0ecbf03a723841daa885b9a288da86806cd5cbea | refs/heads/master | 2021-12-14T01:50:06.764045 | 2019-06-19T10:01:11 | 2019-06-19T10:01:11 | 28,144,907 | 12 | 5 | null | 2021-12-10T18:24:51 | 2014-12-17T16:17:28 | R | UTF-8 | R | false | false | 2,779 | r | lca.R | #' Latent Class Analysis
#'
#' Latent class analysis (LCA) can be used to find groups in the sender nodes (with the condition of independence within the groups). For more details see Gollini, I. (in press) and Gollini, I., and Murphy, T. B. (2014).
#'
#' @param X (\code{N} x \code{M}) binary incidence matrix
#' @param G number of groups
#' @param nstarts integer number of different starts for the EM algorithm. Default \code{nstarts = 3}.
#' @param tol desired tolerance for convergence. Default \code{tol = 0.1^2}
#' @param maxiter maximum number of iterations. Default \code{maxiter = 500}
#'
#' @return List containing the following information for each model fitted:
#' \itemize{
#' \item \code{p} (\code{G} x \code{M}) matrix containing the conditional probability of observing a link to sender nodes if the receiver nodes are from group g.
#' \item \code{eta} \eqn{\eta_g} is the mixing proportion for the group \eqn{g (g = 1,..., G)}, that corresponds to the prior probability that a randomly chosen sender node is in the g-th group.
#' \item \code{z} (\code{N} x \code{G}) matrix containing posterior probability for each sender node to belong to each group
#' \item \code{LL} log likelihood
#' \item \code{BIC} Bayesian Information Criterion (BIC) (Schwarz (1978))
#' }
#' If multiple models are fitted the output contains also a table to compare the BIC for all models fitted.
#'
#' @seealso \code{\link{mlta}}
#' @references Gollini, I. (in press) 'A mixture model approach for clustering bipartite networks', Challenges in Social Network Research Volume in the Lecture Notes in Social Networks (LNSN - Series of Springer). Preprint: \url{https://arxiv.org/abs/1905.02659}.
#' @references Gollini, I., and Murphy, T. B. (2014), 'Mixture of Latent Trait Analyzers for Model-Based Clustering of Categorical Data', Statistics and Computing, 24(4), 569-588 \url{http://arxiv.org/abs/1301.2167}.
#' @export
#' @examples
#' ### Simulate Bipartite Network
#' set.seed(1)
#' X <- matrix(rbinom(4 * 12, size = 1, prob = 0.4), nrow = 12, ncol = 4)
#'
#' resLCA <- lca(X, G = 2:3)
lca <- function(X, G, nstarts = 3, tol = 0.1^2, maxiter = 250) {
if (any(G < 1)) {
print("Specify G > 0!")
return("Specify G > 0!")
}
XS <- XtoS(X)
S <- XS$S
counts <- XS$counts
if (any(G == 1)) {
out <- f_lca_nstarts(S, counts, G, nstarts, tol, maxiter)
} else{
if (length(G) == 1) {
out <- f_lca_nstarts(S, counts, G, nstarts, tol, maxiter)
} else{
out <- vector("list", length(G) + 1)
names(out) <- c(paste('G', G, sep = '='), 'BIC')
i <- 0
for (g in G) {
i <- i + 1
out[[i]] <- f_lca_nstarts(S, counts, g, nstarts, tol, maxiter)
}
out[[length(G) + 1]] <- tableBIC(out)
}
}
out
}
|
220960b42bbfba2fb479af290f353a6734c39300 | 8c24bcf06c173551b1546b432069575d733df87f | /r/an_descriptive_stats_tables.R | 9ad7dd920b2971edab86664da24c7c46fd206fd7 | [
"MIT"
] | permissive | HDRUK/comix_covid-19-first_wave | 8d01f201171294526546ec166ce0373f829a498e | 5cb79c5043c605f41f8d1297af17e9ba5b3ed1a9 | refs/heads/master | 2022-04-13T08:43:10.334586 | 2020-04-13T20:41:53 | 2020-04-13T20:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,636 | r | an_descriptive_stats_tables.R | ## Descriptive values for the paper
library(socialmixr)
library(data.table)
library(ggplot2)
part <- readRDS('data/clean_participants.rds')
contacts <- readRDS('data/clean_contacts.rds')
household <- readRDS('data/clean_households.rds')
## Mean cases per person
## Number of participants
nrow(part)
## Number of contacts
nrow(contacts)
## Average age
part[ , .(avg = median(part_age), sd = sd(part_age), max = max(part_age))]
# Gender
part[ , .(avg = mean(part_gender == "Female"), X = sum(part_gender == "Female"), N = .N)]
## Households
part[ , .(avg = mean(hh_size, na.rm = T), sd = sd(hh_size, na.rm = T), max = max(hh_size, na.rm = T))]
## Regions
part[!is.na(regions_large) , Total := .N]
part[ , N := .N, by = regions_large]
part[,.( N = min(N), Total = max(Total), per = max(N/Total)) , by = regions_large]
## Behaviours and attitudes
## Household member isolating
part_hhm_isolate <- household[, .(isolate = hhm_isolate == "Yes"), by = part_id]
part_hhm_isolate <- part_hhm_isolate[, .(isolate = max(isolate)), by = part_id]
part_hhm_isolate[ , .(per = mean(isolate), X = sum(isolate), N = .N)]
## Household member quarantine
part_hhm_quar <- household[, .(quarantine = hhm_quarantine == "Yes"), by = part_id]
part_hhm_quar <- part_hhm_quar[, .(quarantine = max(quarantine)), by = part_id]
part_hhm_quar[ , .(per = mean(quarantine), X = sum(quarantine), N = .N)]
## Tests and contacts
table(part$part_covid_test_result)
table(part$part_covid_contact)
## Serious and likely disease
part[ , .(per = mean(part_att_likely %in% c("Strongly agree", "Tend to agree")),
X = sum(part_att_likely %in% c("Strongly agree", "Tend to agree")),
N = .N)]
part[ , .(per = mean(part_att_serious %in% c("Strongly agree", "Tend to agree")),
X = sum(part_att_serious %in% c("Strongly agree", "Tend to agree")),
N = .N)]
## Hand washing
## In the table in the paper.
### Effectiveness of interventions
n <- nrow(part)
part[ , .(.N, .N/n), by = part_att_eff_reduce_contacts]
part[ , .(.N, .N/n), by = part_att_eff_stay_home7_mild]
part[ , .(.N, .N/n), by = part_att_eff_stay_home7_severe]
part[ , .(.N, .N/n), by = part_att_eff_stay_home14_severe_not_you]
part[ , .(.N, .N/n), by = part_att_eff_crowd_places]
part[ , .(.N, .N/n), by = part_att_isolate_problems]
part[ , .(.N, .N/n), by = part_att_isolate_enough_food]
## Number of contacts in different locations
pmodpc <- readRDS('data/polymod_contacts_part.rds')
mean(contacts_part[ cnt_home == "No", .N, by = part_id]$N)
mean(contacts_part[ , .N, by = part_id]$N)
|
e575f47f2cde8d5bbf62892d8325fa3024ea5897 | 75e241d972a201b2867eefea84ba3b0f4ded0475 | /man/plotFocals.Rd | 72a3b463e25625f05c954470a83471685d84e8e2 | [] | no_license | bernatgel/FindFocals | d75b8a69a22235d94c166b933ca79caaa80f2f00 | 485033c77b27ba8ea8d5a94f078c1a3638621b3a | refs/heads/master | 2021-01-23T06:05:40.212579 | 2017-09-14T15:53:16 | 2017-09-14T15:53:16 | 102,488,391 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,314 | rd | plotFocals.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotFocals.R
\name{plotFocals}
\alias{plotFocals}
\title{plotFocals}
\usage{
plotFocals(find.focal.results,
plot.gain=TRUE, plot.gain.points=FALSE, gain.col="#FFBD07AA", gain.border.col="#FFBD07AA",
plot.del=TRUE, plot.del.points=FALSE, del.col="#00A6EDAA", del.border.col="#00A6EDAA",
plot.mean.lrr=FALSE, mean.lrr.col="#B0FFA5AA", mean.lrr.border.col=NA, mean.lrr.line.col="#FF7C30",
main="Focal Gains and Losses", chromosomes="canonical", zoom=NULL,
lrr.min=-4, lrr.max=2, total.height=1, bottom=0, margin=0.05,
focal.points.cex=0.6, points.cex=0.3, labels.cex=1.5, main.cex=2, axis.cex=1.2, chr.cex=1.5)
}
\arguments{
\item{find.focal.results}{The results from findFocals}
\item{plot.gain}{(defaults to TRUE)}
\item{plot.gain.points}{(defaults to FALSE)}
\item{gain.col}{(defaults to "#FFBD07AA")}
\item{gain.border.col}{(defaults to "#FFBD07AA")}
\item{plot.del}{(defaults to TRUE)}
\item{plot.del.points}{(defaults to FALSE)}
\item{del.col}{(defaults to "#00A6EDAA")}
\item{del.border.col}{(defaults to "#00A6EDAA")}
\item{plot.mean.lrr}{(defaults to FALSE)}
\item{mean.lrr.col}{(defaults to "#B0FFA5AA")}
\item{mean.lrr.border.col}{(defaults to NA)}
\item{mean.lrr.line.col}{(defaults to "#FF7C30")}
\item{main}{(defaults to "Focal Gains and Losses")}
\item{chromosomes}{(defaults to "canonical")}
\item{zoom}{(defaults to NULL)}
\item{lrr.min}{(defaults to -4)}
\item{lrr.max}{(defaults to 2)}
\item{total.height}{(defaults to 1)}
\item{bottom}{(defaults to 0)}
\item{margin}{(defaults to 0.05)}
\item{focal.points.cex}{(defaults to 0.6)}
\item{points.cex}{(defaults to 0.3)}
\item{labels.cex}{(defaults to 1.5)}
\item{main.cex}{(defaults to 2)}
\item{axis.cex}{(defaults to 1.2)}
\item{chr.cex}{(defaults to 1.5)}
}
\value{
Invisibly returns the karyoplot object representing the plot. With it
it is possible to add other elements to the plot using standrad karyoploteR
functions
}
\description{
Plots the SNP array data with the identified focal gains and losses on top.
}
\details{
Creates a plot with the LRR and BAF values along the genome and adds
rectangles showing the identified focal gains and deletions. Optionally,
the SNPs in each of the focal events may be highlighted.
}
\examples{
}
|
bb974a49f489f73dc94545b35a7d51ca93c38699 | 3a50e82d1796b6d80d384b38e45fdce6a72c2c64 | /Lectures/Lecture 5 - Functions.R | f267f8e025e4af3a85c08ac2890375669f429d0c | [] | no_license | senanarci/CMPE140 | 5362161ac19ab9416f4f8c02049d9277a2c1fd22 | b564832bbce66c32ad20208036f483348660a6cb | refs/heads/master | 2020-03-19T12:16:20.291400 | 2018-05-15T08:56:36 | 2018-05-15T08:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,444 | r | Lecture 5 - Functions.R | # In the context of computer programming, a _function_ is a piece of code that takes _input arguments_, performs a specific task, and _returns its output_.
#
# A first example
# ==============
# Let us define a function that returns the square of a given number.
square <- function(x) {
return(x^2)
}
# We call the function by providing the input value as an argument. The function returns an output.
square(3)
# If we type a function's name and press Enter, we get back the definition of the function.
square
# The syntax of a function definition
# =========
#
# The general structure of a function definition is as follows:
#
# <function_name> <- function([<argument_1>, <argument_2>, ...]) {
# <statements>
# return(<return_value>)
# }
#
# Function arguments
# ==============
# The function can take any number of arguments.
f <- function(x,y,z){
return(x+y*z)
}
f(1,2,3)
# It is possible to change the order of arguments by providing the names explicitly.
f(z=3,x=1,y=2)
# You can even omit some names, and the unnamed arguments will be matched in order.
f(z=3,1,2)
# Return values
# =============
# The _return value_ of the function can be any R object, such as a number, a vector, a matrix, a list, etc.
sumdiff <- function(x,y){
return( c(x+y, x-y) )
}
sumdiff(5,8)
# The function returns the last expression in its block, even if we don't use `return` explicitly.
f <- function(x,y,z){
x+y*z
}
f(1,2,3)
# A function itself is an R object, therefore we can easily write _functions that return functions_.
powerfun <- function(p){
return(function(x) x^p)
}
sq <- powerfun(2)
cube <- powerfun(3)
sq(1.5)
cube(1.5)
# Vectorization of functions
# ===========
# The simple function `square()` defined above happens to work with vector arguments without any modification, because the returned statement `x^2` is valid for both numbers and vectors.
square <- function(x) {
x^2
}
square(c(1,2,3))
# However, functions are not always vectorized. See the following counterexample:
addupto <- function(n){
total <- 0
for (i in 1:n)
total <- total + i
return(total)
}
addupto(10)
# When we call this function with a vector argument, only the first element is taken, and a warning message is issued
addupto(c(10,20))
# If you want this function to work with vector input, you can use a loop that iterates over the input vector.
addupto_vec <- function(nvec){
result <- c()
for (n in nvec){
total <- 0
for (i in 1:n)
total <- total + i
result <- c(result, total)
}
return(result)
}
addupto_vec(c(10,20))
# Alternatively, you can use the built-in `sapply()` function, which maps a function on each element of a vector.
sapply(c(10,20), addupto)
# Default arguments
# ==============
# When you define a function, you can set some of the arguments to default values. Then you don't have to specify them at each call.
f <- function(capital, interest_rate=0.1) {
capital * (1+interest_rate)
}
# Without specifying the `interest_rate` value, 0.1 is assumed.
f(1000)
# But if you want to change it, you can provide it as an extra argument.
f(1000,0.2)
# If you want to change the order of the arguments,
f(interest_rate = 0.2, 1000)
# Arbitrary number of arguments
# ====================
# The function definitions above take a fixed number of arguments. If desired, we can define the function for an arbitrary number of arguments, without specifying their name or how many they are.
f <- function(...) {
total <- 0
for (i in c(...)) {
print(i)
total <- i
}
total
}
f(1,2,6,2,5)
# This feature is commonly used if you want to pass some of the arguments to another function.
f <- function(x, y){
cat("This is function f(). Here x =",x,"y=",y,"\n")
return(x+y)
}
g <- function(z, ...){
cat("This is function g(). Here z =",z,"\n")
fresult <- f(...)
return(z + fresult)
}
g(5,8,4)
g
# Variable scope
# ============
# The value of a variable defined outside a function (a _global variable_) can be accessed inside a function. However, a variable defined inside a function block is not recognized outside of it.
# We say that the _scope_ of the variable `b` is limited to the function `f()`.
rm(list = ls())
a <- 5
f <- function(){
b <- 10
cat("inside f(): a =",a,"b =",b,"\n")
}
f()
cat("outside f(): a =",a," ")
cat("b =",b)
a <- 5
f <- function(){
b <- 10
cat("inside f(): a =",a,"b =",b,"\n")
}
f()
cat("outside f(): a =",a," ")
cat("b =",b)
# A local variable temporarily overrides a global variable with the same name.
a <- 5
cat("before f(): a =",a,"\n")
f <- function(){
a <- 10
cat("inside f(): a =",a,"\n")
}
f()
cat("after f(): a =",a)
# R allows for nested function definitions. So we can define functions within functions, within functions. Scope rules apply in the same way: A variable is available in the current level, and all the levels below in the hierarchy, but not above it.
aaa <- 5
f <- function(){
bbb <- 10
g <- function(){
ccc <- 10
cat("\nInside g(): aaa =", aaa, "bbb =",bbb, "ccc =",ccc,"\n")
}
g()
cat("\nInside f(): aaa =", aaa, "bbb =", bbb,"\n")
cat("ccc exists?", exists("ccc"),"\n")
}
f()
cat("\nOutside f():\n")
cat("aaa exists?", exists("aaa"),"\n")
cat("bbb exists?", exists("bbb"),"\n")
cat("ccc exists?", exists("ccc"),"\n")
# Assigning values to upper-level variables
# ==========
# Although the values of variables defined in upper levels are available in lower levels, they cannot be modified in a lower level, because an attempt in assignment will create only a local variable with the same name.
a <- 5
cat("before f(): a =",a,"\n")
f <- function(){
a <- 10
cat("inside f(): a =",a,"\n")
}
f()
cat("after f(): a =",a)
# Using the _superassignment operator_ `<<-` it is possible to assign to a variable in the higher level.
a <- 5
cat("before f(): a =",a,"\n")
f <- function(){
a <<- 10
cat("inside f(): a =",a,"\n")
}
f()
cat("after f(): a =",a)
a <- 5
cat("before f(): a =",a,"\n")
f <- function(){
g <- function(){
a <<- 20
cat("inside g(): a =",a,"\n")
}
cat("inside f(), before g(): a =",a,"\n")
g()
cat("inside f(), after g(): a =",a,"\n")
}
f()
cat("after f(): a =",a)
# Note that the superassignment affects all the levels above it.
|
bb9a30a289bc7b20c5735b048a230e6983f754f9 | 6ae8053b99c90a0b4bef5ee069efe4b008dc046d | /plot3.R | 639c93193e996d5b5548c5327161aab5025bb4b2 | [] | no_license | rui-r-duan/ExData_Plotting1 | 47730d5737a9fa3eaea3f1048827251c67794ca9 | b13d644a0e19931b42c49e4f5dd2a113eec488f9 | refs/heads/master | 2021-07-19T06:53:34.432211 | 2017-03-13T05:10:11 | 2017-03-13T05:10:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 750 | r | plot3.R | source("loaddata.R")
# Drawing on the fixed size PNG directly makes the size of the legend beautiful.
# If we draw it on the screen device and then dev.copy(png,...), the legend will be ugly.
# The example images in "figure" directory are 504 * 504. I choose 500 * 500 to make
# a slight difference.
png("plot3.png", width = 500, height = 500)
plot(df$Time, df$Sub_metering_1, type = "l",
ylab = "Energy sub metering", xlab = "", col = "black")
lines(df$Time, df$Sub_metering_2, type = "l", col = "red")
lines(df$Time, df$Sub_metering_3, type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"),
lty = c("solid", "solid", "solid"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
9995d4f44de0ebc3c55ed01b0f46e9767af5d545 | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googlepubsubv1beta1a.auto/man/Subscription.Rd | 35544492fa6c85de1cb511a4360b67aa06480ee9 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 833 | rd | Subscription.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pubsub_objects.R
\name{Subscription}
\alias{Subscription}
\title{Subscription Object}
\usage{
Subscription(name = NULL, topic = NULL, pushConfig = NULL,
ackDeadlineSeconds = NULL)
}
\arguments{
\item{name}{Name of the subscription}
\item{topic}{The name of the topic from which this subscription is receiving messages}
\item{pushConfig}{If push delivery is used with this subscription, this field is}
\item{ackDeadlineSeconds}{For either push or pull delivery, the value is the maximum time after a}
}
\value{
Subscription object
}
\description{
Subscription Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A subscription resource.
}
\seealso{
Other Subscription functions: \code{\link{subscriptions.create}}
}
|
23585b374821ea0edc57c240b6dc3f2a66268231 | 20a1c0fb795a8bea4ad2469f89ec3c7c9cd121f1 | /man/backward.glm.Rd | 32ce0817791dcbde94e78d56631cb8fcf1c11c81 | [] | no_license | Oleksandr-Soloshenko/StepwiseBinaryLogisticRegression | a3901e7de000330ae67a7562b261c5e77b56beed | 3e2dbaa107550d833d355d6e6c6018c005c7af36 | refs/heads/master | 2022-04-24T02:03:58.254849 | 2020-04-21T08:49:20 | 2020-04-21T08:49:20 | 257,266,469 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,451 | rd | backward.glm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backward.glm.R
\name{backward.glm}
\alias{backward.glm}
\title{Backward variable selection using given full model}
\usage{
backward.glm(model, sls = 0.05, printing = TRUE)
}
\arguments{
\item{model}{A full 'glm' model preliminarily built with formula/data/family parameters only}
\item{sls}{A significance level for stay (sls) applied for each backward elimination step}
\item{printing}{A logical parameter indicating report information to console output}
}
\value{
The shrunk 'glm' model where all input variables satisfy significance level for stay
}
\description{
Takes in any full 'glm' model and significance level for stay (sls) then conducts backward variable selection and returns shrunk model.
}
\examples{
# Example 1:
library(mlbench)
data(PimaIndiansDiabetes2)
df <- na.omit(PimaIndiansDiabetes2) # removing rows with any NA values
df$diabetes <- ifelse(df$diabetes == "pos", 1, 0)
full_model <- glm(diabetes ~ ., data = df, family = binomial())
print(formula(full_model))
library(StepwiseBinaryLogisticRegression)
model <- backward.glm(full_model)
print(formula(model))
print(summary(model))
# Example 2:
library(StepwiseBinaryLogisticRegression)
data(Remission)
full_model2 <- glm(remiss ~ ., data = Remission, family = binomial())
print(formula(full_model2))
model2 <- backward.glm(full_model2, sls = 0.35)
print(formula(model2))
print(summary(model2))
}
|
ba230f802cc9fe1e967d3071f884033d4f9aa644 | 74e9751fa7839d3924543fa474acf2f69d6443ce | /dissinfl.R | cfb59ae5069f9d76ba06109d4b65f8a4a99b85e1 | [] | no_license | grojasmatute/mexico | 2d372a179fd8da8a71d5c8bd391c9b39069cd24d | 157f976fbfbee0f9d32e26d9d5fe2971eeb79894 | refs/heads/master | 2020-05-17T16:41:01.174329 | 2019-04-27T16:45:48 | 2019-04-27T16:45:48 | 183,826,532 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,451 | r | dissinfl.R |
## inflation one year ahead
##2005
inf205_id2 <- subset(newdf,inft == "infgen_a06" & variable =="infgentmas1" & analyst == 4 )
inf205_id2
##df for only inflation of the current period
infdf0506 <- NULL # empty subset
infpon0506 <- matrix()
infidpoint0506 <- add.col(gy0506, infpon0506)
infidpoint0506
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf0506 <- subset(newdf,inft == "infgen_a06" & variable =="infgentmas1" & analyst == t )
infidpoint0506 <- add.col(infidpoint0506,infdf0506[,5]) }
infidpoint0506
infidpoint0506_0 <- infidpoint0506
infidpoint0506_0[is.na(infidpoint0506_0)] <- 0
infidpoint0506_0
infupdate0506_0 <- matrix(0, nrow = nrow(infidpoint0506_0 ), ncol = ncol(infidpoint0506_0 ) )
for (j in 2:ncol(infidpoint0506_0 )) {for (i in 2:nrow(infidpoint0506_0)) { infupdate0506_0[i,j] <- ifelse (infidpoint0506_0[i,j] == infidpoint0506_0[i-1,j],0,1) & (infidpoint0506_0[i,j]!=0) }}
infupdate0506_0
## create a new column with proportion of updates
infupdaters0506_0 <- numeric(nrow(infidpoint0506_0))
for(t in 2:nrow(infidpoint0506_0 )) {infupdaters0506_0[t] <- sum(infupdate0506_0[t,2:ncol(infupdate0506_0)])}
infupdaters0506_0 ## total of updaters
## participants
inftotal0506 <- numeric(12)
infparti0506<- matrix(0, nrow = nrow(infidpoint0506_0 ), ncol = ncol(infidpoint0506_0 ) )
for (j in 2:ncol(infidpoint0506_0)) {for (i in 1:nrow(infidpoint0506_0 )) { infparti0506[i,j] <- ifelse (infidpoint0506_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint0506_0)) {inftotal0506[t] <- sum(infparti0506[t,2:ncol(infparti0506)])}
inftotal0506
infupdaters0506_0 <- infupdaters0506_0[2:12]/inftotal0506[2:12]
infupdaters0506_0
##2006-07
inf206_id2 <- subset(newdf,inft == "infgen_a07" & variable =="infgentmas1" & analyst == 4 )
inf206_id2
##df for only inflation of the current period
infdf0607 <- NULL # empty subset
infpon0607 <- matrix()
infidpoint0607 <- add.col(gy0607, infpon0607)
infidpoint0607
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf0607 <- subset(newdf,inft == "infgen_a07" & variable =="infgentmas1" & analyst == t )
infidpoint0607 <- add.col(infidpoint0607,infdf0607[,5]) }
infidpoint0607
infidpoint0607_0 <- infidpoint0607
infidpoint0607_0[is.na(infidpoint0607_0)] <- 0
infidpoint0607_0
infupdate0607_0 <- matrix(0, nrow = nrow(infidpoint0607_0 ), ncol = ncol(infidpoint0607_0 ) )
for (j in 2:ncol(infidpoint0607_0 )) {for (i in 2:nrow(infidpoint0607_0)) { infupdate0607_0[i,j] <- ifelse (infidpoint0607_0[i,j] == infidpoint0607_0[i-1,j],0,1) & (infidpoint0607_0[i,j]!=0) }}
infupdate0607_0
## create a new column with proportion of updates
infupdaters0607_0 <- numeric(nrow(infidpoint0607_0))
for(t in 2:nrow(infidpoint0607_0 )) {infupdaters0607_0[t] <- sum(infupdate0607_0[t,2:ncol(infupdate0607_0)])}
infupdaters0607_0 ## total of updaters
## participants
inftotal0607 <- numeric(12)
infparti0607<- matrix(0, nrow = nrow(infidpoint0607_0 ), ncol = ncol(infidpoint0607_0 ) )
for (j in 2:ncol(infidpoint0607_0)) {for (i in 1:nrow(infidpoint0607_0 )) { infparti0607[i,j] <- ifelse (infidpoint0607_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint0607_0)) {inftotal0607[t] <- sum(infparti0607[t,2:ncol(infparti0607)])}
inftotal0607
infupdaters0607_0 <- infupdaters0607_0[2:12]/inftotal0607[2:12]
infupdaters0607_0
##2007-08
inf207_id2 <- subset(newdf,inft == "infgen_a08" & variable =="infgentmas1" & analyst == 4 )
inf207_id2
##df for only inflation of the current period
infdf0708 <- NULL # empty subset
infpon0708 <- matrix()
infidpoint0708 <- add.col(gy0708, infpon0708)
infidpoint0708
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf0708 <- subset(newdf,inft == "infgen_a08" & variable =="infgentmas1" & analyst == t )
infidpoint0708 <- add.col(infidpoint0708,infdf0708[,5]) }
infidpoint0708
infidpoint0708_0 <- infidpoint0708
infidpoint0708_0[is.na(infidpoint0708_0)] <- 0
infidpoint0708_0
infupdate0708_0 <- matrix(0, nrow = nrow(infidpoint0708_0 ), ncol = ncol(infidpoint0708_0 ) )
for (j in 2:ncol(infidpoint0708_0 )) {for (i in 2:nrow(infidpoint0708_0)) { infupdate0708_0[i,j] <- ifelse (infidpoint0708_0[i,j] == infidpoint0708_0[i-1,j],0,1) & (infidpoint0708_0[i,j]!=0) }}
infupdate0708_0
## create a new column with proportion of updates
infupdaters0708_0 <- numeric(nrow(infidpoint0708_0))
for(t in 2:nrow(infidpoint0708_0 )) {infupdaters0708_0[t] <- sum(infupdate0708_0[t,2:ncol(infupdate0708_0)])}
infupdaters0708_0 ## total of updaters
## participants
inftotal0708 <- numeric(12)
infparti0708<- matrix(0, nrow = nrow(infidpoint0708_0 ), ncol = ncol(infidpoint0708_0 ) )
for (j in 2:ncol(infidpoint0708_0)) {for (i in 1:nrow(infidpoint0708_0 )) { infparti0708[i,j] <- ifelse (infidpoint0708_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint0708_0)) {inftotal0708[t] <- sum(infparti0708[t,2:ncol(infparti0708)])}
inftotal0708
infupdaters0708_0 <- infupdaters0708_0[2:12]/inftotal0708[2:12]
infupdaters0708_0
##2008-09
inf208_id2 <- subset(newdf,inft == "infgen_a09" & variable =="infgentmas1" & analyst == 4 )
inf208_id2
##df for only inflation of the current period
infdf0809 <- NULL # empty subset
infpon0809 <- matrix()
infidpoint0809 <- add.col(gy0809, infpon0809)
infidpoint0809
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf0809 <- subset(newdf,inft == "infgen_a09" & variable =="infgentmas1" & analyst == t )
infidpoint0809 <- add.col(infidpoint0809,infdf0809[,5]) }
infidpoint0809
infidpoint0809_0 <- infidpoint0809
infidpoint0809_0[is.na(infidpoint0809_0)] <- 0
infidpoint0809_0
infupdate0809_0 <- matrix(0, nrow = nrow(infidpoint0809_0 ), ncol = ncol(infidpoint0809_0 ) )
for (j in 2:ncol(infidpoint0809_0 )) {for (i in 2:nrow(infidpoint0809_0)) { infupdate0809_0[i,j] <- ifelse (infidpoint0809_0[i,j] == infidpoint0809_0[i-1,j],0,1) & (infidpoint0809_0[i,j]!=0) }}
infupdate0809_0
## create a new column with proportion of updates
infupdaters0809_0 <- numeric(nrow(infidpoint0809_0))
for(t in 2:nrow(infidpoint0809_0 )) {infupdaters0809_0[t] <- sum(infupdate0809_0[t,2:ncol(infupdate0809_0)])}
infupdaters0809_0 ## total of updaters
## participants
inftotal0809 <- numeric(12)
infparti0809<- matrix(0, nrow = nrow(infidpoint0809_0 ), ncol = ncol(infidpoint0809_0 ) )
for (j in 2:ncol(infidpoint0809_0)) {for (i in 1:nrow(infidpoint0809_0 )) { infparti0809[i,j] <- ifelse (infidpoint0809_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint0809_0)) {inftotal0809[t] <- sum(infparti0809[t,2:ncol(infparti0809)])}
inftotal0809
infupdaters0809_0 <- infupdaters0809_0[2:12]/inftotal0809[2:12]
infupdaters0809_0
##2009-10
inf209_id2 <- subset(newdf,inft == "infgen_a10" & variable =="infgentmas1" & analyst == 4 )
inf209_id2
##df for only inflation of the current period
infdf0910 <- NULL # empty subset
infpon0910 <- matrix()
infidpoint0910 <- add.col(gy0910, infpon0910)
infidpoint0910
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf0910 <- subset(newdf,inft == "infgen_a10" & variable =="infgentmas1" & analyst == t )
infidpoint0910 <- add.col(infidpoint0910,infdf0910[,5]) }
infidpoint0910
infidpoint0910_0 <- infidpoint0910
infidpoint0910_0[is.na(infidpoint0910_0)] <- 0
infidpoint0910_0
infupdate0910_0 <- matrix(0, nrow = nrow(infidpoint0910_0 ), ncol = ncol(infidpoint0910_0 ) )
for (j in 2:ncol(infidpoint0910_0 )) {for (i in 2:nrow(infidpoint0910_0)) { infupdate0910_0[i,j] <- ifelse (infidpoint0910_0[i,j] == infidpoint0910_0[i-1,j],0,1) & (infidpoint0910_0[i,j]!=0) }}
infupdate0910_0
## create a new column with proportion of updates
infupdaters0910_0 <- numeric(nrow(infidpoint0910_0))
for(t in 2:nrow(infidpoint0910_0 )) {infupdaters0910_0[t] <- sum(infupdate0910_0[t,2:ncol(infupdate0910_0)])}
infupdaters0910_0 ## total of updaters
## participants
inftotal0910 <- numeric(12)
infparti0910<- matrix(0, nrow = nrow(infidpoint0910_0 ), ncol = ncol(infidpoint0910_0 ) )
for (j in 2:ncol(infidpoint0910_0)) {for (i in 1:nrow(infidpoint0910_0 )) { infparti0910[i,j] <- ifelse (infidpoint0910_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint0910_0)) {inftotal0910[t] <- sum(infparti0910[t,2:ncol(infparti0910)])}
inftotal0910
infupdaters0910_0 <- infupdaters0910_0[2:12]/inftotal0910[2:12]
infupdaters0910_0
##2010-11
inf210_id2 <- subset(newdf,inft == "infgen_a11" & variable =="infgentmas1" & analyst == 4 )
inf210_id2
##df for only inflation of the current period
infdf1011 <- NULL # empty subset
infpon1011 <- matrix()
infidpoint1011 <- add.col(gy1011, infpon1011)
infidpoint1011
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1011 <- subset(newdf,inft == "infgen_a11" & variable =="infgentmas1" & analyst == t )
infidpoint1011 <- add.col(infidpoint1011,infdf1011[,5]) }
infidpoint1011
infidpoint1011_0 <- infidpoint1011
infidpoint1011_0[is.na(infidpoint1011_0)] <- 0
infidpoint1011_0
infupdate1011_0 <- matrix(0, nrow = nrow(infidpoint1011_0 ), ncol = ncol(infidpoint1011_0 ) )
for (j in 2:ncol(infidpoint1011_0 )) {for (i in 2:nrow(infidpoint1011_0)) { infupdate1011_0[i,j] <- ifelse (infidpoint1011_0[i,j] == infidpoint1011_0[i-1,j],0,1) & (infidpoint1011_0[i,j]!=0) }}
infupdate1011_0
## create a new column with proportion of updates
infupdaters1011_0 <- numeric(nrow(infidpoint1011_0))
for(t in 2:nrow(infidpoint1011_0 )) {infupdaters1011_0[t] <- sum(infupdate1011_0[t,2:ncol(infupdate1011_0)])}
infupdaters1011_0 ## total of updaters
## participants
inftotal1011 <- numeric(12)
infparti1011<- matrix(0, nrow = nrow(infidpoint1011_0 ), ncol = ncol(infidpoint1011_0 ) )
for (j in 2:ncol(infidpoint1011_0)) {for (i in 1:nrow(infidpoint1011_0 )) { infparti1011[i,j] <- ifelse (infidpoint1011_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1011_0)) {inftotal1011[t] <- sum(infparti1011[t,2:ncol(infparti1011)])}
inftotal1011
infupdaters1011_0 <- infupdaters1011_0[2:12]/inftotal1011[2:12]
infupdaters1011_0
##2010-11
inf210_id2 <- subset(newdf,inft == "infgen_a11" & variable =="infgentmas1" & analyst == 4 )
inf210_id2
##df for only inflation of the current period
infdf1011 <- NULL # empty subset
infpon1011 <- matrix()
infidpoint1011 <- add.col(gy1011, infpon1011)
infidpoint1011
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1011 <- subset(newdf,inft == "infgen_a11" & variable =="infgentmas1" & analyst == t )
infidpoint1011 <- add.col(infidpoint1011,infdf1011[,5]) }
infidpoint1011
infidpoint1011_0 <- infidpoint1011
infidpoint1011_0[is.na(infidpoint1011_0)] <- 0
infidpoint1011_0
infupdate1011_0 <- matrix(0, nrow = nrow(infidpoint1011_0 ), ncol = ncol(infidpoint1011_0 ) )
for (j in 2:ncol(infidpoint1011_0 )) {for (i in 2:nrow(infidpoint1011_0)) { infupdate1011_0[i,j] <- ifelse (infidpoint1011_0[i,j] == infidpoint1011_0[i-1,j],0,1) & (infidpoint1011_0[i,j]!=0) }}
infupdate1011_0
## create a new column with proportion of updates
infupdaters1011_0 <- numeric(nrow(infidpoint1011_0))
for(t in 2:nrow(infidpoint1011_0 )) {infupdaters1011_0[t] <- sum(infupdate1011_0[t,2:ncol(infupdate1011_0)])}
infupdaters1011_0 ## total of updaters
## participants
inftotal1011 <- numeric(12)
infparti1011<- matrix(0, nrow = nrow(infidpoint1011_0 ), ncol = ncol(infidpoint1011_0 ) )
for (j in 2:ncol(infidpoint1011_0)) {for (i in 1:nrow(infidpoint1011_0 )) { infparti1011[i,j] <- ifelse (infidpoint1011_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1011_0)) {inftotal1011[t] <- sum(infparti1011[t,2:ncol(infparti1011)])}
inftotal1011
infupdaters1011_0 <- infupdaters1011_0[2:12]/inftotal1011[2:12]
infupdaters1011_0
##2011-12
inf211_id2 <- subset(newdf,inft == "infgen_a12" & variable =="infgentmas1" & analyst == 4 )
inf211_id2
##df for only inflation of the current period
infdf1112 <- NULL # empty subset
infpon1112 <- matrix()
infidpoint1112 <- add.col(gy1112, infpon1112)
infidpoint1112
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1112 <- subset(newdf,inft == "infgen_a12" & variable =="infgentmas1" & analyst == t )
infidpoint1112 <- add.col(infidpoint1112,infdf1112[,5]) }
infidpoint1112
infidpoint1112_0 <- infidpoint1112
infidpoint1112_0[is.na(infidpoint1112_0)] <- 0
infidpoint1112_0
infupdate1112_0 <- matrix(0, nrow = nrow(infidpoint1112_0 ), ncol = ncol(infidpoint1112_0 ) )
for (j in 2:ncol(infidpoint1112_0 )) {for (i in 2:nrow(infidpoint1112_0)) { infupdate1112_0[i,j] <- ifelse (infidpoint1112_0[i,j] == infidpoint1112_0[i-1,j],0,1) & (infidpoint1112_0[i,j]!=0) }}
infupdate1112_0
## create a new column with proportion of updates
infupdaters1112_0 <- numeric(nrow(infidpoint1112_0))
for(t in 2:nrow(infidpoint1112_0 )) {infupdaters1112_0[t] <- sum(infupdate1112_0[t,2:ncol(infupdate1112_0)])}
infupdaters1112_0 ## total of updaters
## participants
inftotal1112 <- numeric(12)
infparti1112<- matrix(0, nrow = nrow(infidpoint1112_0 ), ncol = ncol(infidpoint1112_0 ) )
for (j in 2:ncol(infidpoint1112_0)) {for (i in 1:nrow(infidpoint1112_0 )) { infparti1112[i,j] <- ifelse (infidpoint1112_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1112_0)) {inftotal1112[t] <- sum(infparti1112[t,2:ncol(infparti1112)])}
inftotal1112
infupdaters1112_0 <- infupdaters1112_0[2:12]/inftotal1112[2:12]
infupdaters1112_0
##2012-13
inf212_id2 <- subset(newdf,inft == "infgen_a13" & variable =="infgentmas1" & analyst == 4 )
inf212_id2
##df for only inflation of the current period
infdf1213 <- NULL # empty subset
infpon1213 <- matrix()
infidpoint1213 <- add.col(gy1213, infpon1213)
infidpoint1213
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1213 <- subset(newdf,inft == "infgen_a13" & variable =="infgentmas1" & analyst == t )
infidpoint1213 <- add.col(infidpoint1213,infdf1213[,5]) }
infidpoint1213
infidpoint1213_0 <- infidpoint1213
infidpoint1213_0[is.na(infidpoint1213_0)] <- 0
infidpoint1213_0
infupdate1213_0 <- matrix(0, nrow = nrow(infidpoint1213_0 ), ncol = ncol(infidpoint1213_0 ) )
for (j in 2:ncol(infidpoint1213_0 )) {for (i in 2:nrow(infidpoint1213_0)) { infupdate1213_0[i,j] <- ifelse (infidpoint1213_0[i,j] == infidpoint1213_0[i-1,j],0,1) & (infidpoint1213_0[i,j]!=0) }}
infupdate1213_0
## create a new column with proportion of updates
infupdaters1213_0 <- numeric(nrow(infidpoint1213_0))
for(t in 2:nrow(infidpoint1213_0 )) {infupdaters1213_0[t] <- sum(infupdate1213_0[t,2:ncol(infupdate1213_0)])}
infupdaters1213_0 ## total of updaters
## participants
inftotal1213 <- numeric(12)
infparti1213<- matrix(0, nrow = nrow(infidpoint1213_0 ), ncol = ncol(infidpoint1213_0 ) )
for (j in 2:ncol(infidpoint1213_0)) {for (i in 1:nrow(infidpoint1213_0 )) { infparti1213[i,j] <- ifelse (infidpoint1213_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1213_0)) {inftotal1213[t] <- sum(infparti1213[t,2:ncol(infparti1213)])}
inftotal1213
infupdaters1213_0 <- infupdaters1213_0[2:12]/inftotal1213[2:12]
infupdaters1213_0
##2013-14
inf213_id2 <- subset(newdf,inft == "infgen_a14" & variable =="infgentmas1" & analyst == 4 )
inf213_id2
##df for only inflation of the current period
infdf1314 <- NULL # empty subset
infpon1314 <- matrix()
infidpoint1314 <- add.col(gy1314, infpon1314)
infidpoint1314
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1314 <- subset(newdf,inft == "infgen_a14" & variable =="infgentmas1" & analyst == t )
infidpoint1314 <- add.col(infidpoint1314,infdf1314[,5]) }
infidpoint1314
infidpoint1314_0 <- infidpoint1314
infidpoint1314_0[is.na(infidpoint1314_0)] <- 0
infidpoint1314_0
infupdate1314_0 <- matrix(0, nrow = nrow(infidpoint1314_0 ), ncol = ncol(infidpoint1314_0 ) )
for (j in 2:ncol(infidpoint1314_0 )) {for (i in 2:nrow(infidpoint1314_0)) { infupdate1314_0[i,j] <- ifelse (infidpoint1314_0[i,j] == infidpoint1314_0[i-1,j],0,1) & (infidpoint1314_0[i,j]!=0) }}
infupdate1314_0
## create a new column with proportion of updates
infupdaters1314_0 <- numeric(nrow(infidpoint1314_0))
for(t in 2:nrow(infidpoint1314_0 )) {infupdaters1314_0[t] <- sum(infupdate1314_0[t,2:ncol(infupdate1314_0)])}
infupdaters1314_0 ## total of updaters
## participants
inftotal1314 <- numeric(12)
infparti1314<- matrix(0, nrow = nrow(infidpoint1314_0 ), ncol = ncol(infidpoint1314_0 ) )
for (j in 2:ncol(infidpoint1314_0)) {for (i in 1:nrow(infidpoint1314_0 )) { infparti1314[i,j] <- ifelse (infidpoint1314_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1314_0)) {inftotal1314[t] <- sum(infparti1314[t,2:ncol(infparti1314)])}
inftotal1314
infupdaters1314_0 <- infupdaters1314_0[2:12]/inftotal1314[2:12]
infupdaters1314_0
##2014-15
inf214_id2 <- subset(newdf,inft == "infgen_a15" & variable =="infgentmas1" & analyst == 4 )
inf214_id2
##df for only inflation of the current period
infdf1415 <- NULL # empty subset
infpon1415 <- matrix()
infidpoint1415 <- add.col(gy1415, infpon1415)
infidpoint1415
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1415 <- subset(newdf,inft == "infgen_a15" & variable =="infgentmas1" & analyst == t )
infidpoint1415 <- add.col(infidpoint1415,infdf1415[,5]) }
infidpoint1415
infidpoint1415_0 <- infidpoint1415
infidpoint1415_0[is.na(infidpoint1415_0)] <- 0
infidpoint1415_0
infupdate1415_0 <- matrix(0, nrow = nrow(infidpoint1415_0 ), ncol = ncol(infidpoint1415_0 ) )
for (j in 2:ncol(infidpoint1415_0 )) {for (i in 2:nrow(infidpoint1415_0)) { infupdate1415_0[i,j] <- ifelse (infidpoint1415_0[i,j] == infidpoint1415_0[i-1,j],0,1) & (infidpoint1415_0[i,j]!=0) }}
infupdate1415_0
## create a new column with proportion of updates
infupdaters1415_0 <- numeric(nrow(infidpoint1415_0))
for(t in 2:nrow(infidpoint1415_0 )) {infupdaters1415_0[t] <- sum(infupdate1415_0[t,2:ncol(infupdate1415_0)])}
infupdaters1415_0 ## total of updaters
## participants
inftotal1415 <- numeric(12)
infparti1415<- matrix(0, nrow = nrow(infidpoint1415_0 ), ncol = ncol(infidpoint1415_0 ) )
for (j in 2:ncol(infidpoint1415_0)) {for (i in 1:nrow(infidpoint1415_0 )) { infparti1415[i,j] <- ifelse (infidpoint1415_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1415_0)) {inftotal1415[t] <- sum(infparti1415[t,2:ncol(infparti1415)])}
inftotal1415
infupdaters1415_0 <- infupdaters1415_0[2:12]/inftotal1415[2:12]
infupdaters1415_0
##2015-16
inf215_id2 <- subset(newdf,inft == "infgen_a16" & variable =="infgentmas1" & analyst == 4 )
inf215_id2
##df for only inflation of the current period
infdf1516 <- NULL # empty subset
infpon1516 <- matrix()
infidpoint1516 <- add.col(gy1516, infpon1516)
infidpoint1516
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1516 <- subset(newdf,inft == "infgen_a16" & variable =="infgentmas1" & analyst == t )
infidpoint1516 <- add.col(infidpoint1516,infdf1516[,5]) }
infidpoint1516
infidpoint1516_0 <- infidpoint1516
infidpoint1516_0[is.na(infidpoint1516_0)] <- 0
infidpoint1516_0
infupdate1516_0 <- matrix(0, nrow = nrow(infidpoint1516_0 ), ncol = ncol(infidpoint1516_0 ) )
for (j in 2:ncol(infidpoint1516_0 )) {for (i in 2:nrow(infidpoint1516_0)) { infupdate1516_0[i,j] <- ifelse (infidpoint1516_0[i,j] == infidpoint1516_0[i-1,j],0,1) & (infidpoint1516_0[i,j]!=0) }}
infupdate1516_0
## create a new column with proportion of updates
infupdaters1516_0 <- numeric(nrow(infidpoint1516_0))
for(t in 2:nrow(infidpoint1516_0 )) {infupdaters1516_0[t] <- sum(infupdate1516_0[t,2:ncol(infupdate1516_0)])}
infupdaters1516_0 ## total of updaters
## participants
inftotal1516 <- numeric(12)
infparti1516<- matrix(0, nrow = nrow(infidpoint1516_0 ), ncol = ncol(infidpoint1516_0 ) )
for (j in 2:ncol(infidpoint1516_0)) {for (i in 1:nrow(infidpoint1516_0 )) { infparti1516[i,j] <- ifelse (infidpoint1516_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1516_0)) {inftotal1516[t] <- sum(infparti1516[t,2:ncol(infparti1516)])}
inftotal1516
infupdaters1516_0 <- infupdaters1516_0[2:12]/inftotal1516[2:12]
infupdaters1516_0
##2016-17
inf216_id2 <- subset(newdf,inft == "infgen_a17" & variable =="infgentmas1" & analyst == 4 )
inf216_id2
##df for only inflation of the current period
infdf1617 <- NULL # empty subset
infpon1617 <- matrix()
infidpoint1617 <- add.col(gy1617, infpon1617)
infidpoint1617
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1617 <- subset(newdf,inft == "infgen_a17" & variable =="infgentmas1" & analyst == t )
infidpoint1617 <- add.col(infidpoint1617,infdf1617[,5]) }
infidpoint1617
infidpoint1617_0 <- infidpoint1617
infidpoint1617_0[is.na(infidpoint1617_0)] <- 0
infidpoint1617_0
infupdate1617_0 <- matrix(0, nrow = nrow(infidpoint1617_0 ), ncol = ncol(infidpoint1617_0 ) )
for (j in 2:ncol(infidpoint1617_0 )) {for (i in 2:nrow(infidpoint1617_0)) { infupdate1617_0[i,j] <- ifelse (infidpoint1617_0[i,j] == infidpoint1617_0[i-1,j],0,1) & (infidpoint1617_0[i,j]!=0) }}
infupdate1617_0
## create a new column with proportion of updates
infupdaters1617_0 <- numeric(nrow(infidpoint1617_0))
for(t in 2:nrow(infidpoint1617_0 )) {infupdaters1617_0[t] <- sum(infupdate1617_0[t,2:ncol(infupdate1617_0)])}
infupdaters1617_0 ## total of updaters
## participants
inftotal1617 <- numeric(12)
infparti1617<- matrix(0, nrow = nrow(infidpoint1617_0 ), ncol = ncol(infidpoint1617_0 ) )
for (j in 2:ncol(infidpoint1617_0)) {for (i in 1:nrow(infidpoint1617_0 )) { infparti1617[i,j] <- ifelse (infidpoint1617_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1617_0)) {inftotal1617[t] <- sum(infparti1617[t,2:ncol(infparti1617)])}
inftotal1617
infupdaters1617_0 <- infupdaters1617_0[2:12]/inftotal1617[2:12]
infupdaters1617_0
##2017-18
inf217_id2 <- subset(newdf,inft == "infgen_a18" & variable =="infgentmas1" & analyst == 4 )
inf217_id2
##df for only inflation of the current period
infdf1718 <- NULL # empty subset
infpon1718 <- matrix()
infidpoint1718 <- add.col(gy1718, infpon1718)
infidpoint1718
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1718 <- subset(newdf,inft == "infgen_a18" & variable =="infgentmas1" & analyst == t )
infidpoint1718 <- add.col(infidpoint1718,infdf1718[,5]) }
infidpoint1718
infidpoint1718_0 <- infidpoint1718
infidpoint1718_0[is.na(infidpoint1718_0)] <- 0
infidpoint1718_0
infupdate1718_0 <- matrix(0, nrow = nrow(infidpoint1718_0 ), ncol = ncol(infidpoint1718_0 ) )
for (j in 2:ncol(infidpoint1718_0 )) {for (i in 2:nrow(infidpoint1718_0)) { infupdate1718_0[i,j] <- ifelse (infidpoint1718_0[i,j] == infidpoint1718_0[i-1,j],0,1) & (infidpoint1718_0[i,j]!=0) }}
infupdate1718_0
## create a new column with proportion of updates
infupdaters1718_0 <- numeric(nrow(infidpoint1718_0))
for(t in 2:nrow(infidpoint1718_0 )) {infupdaters1718_0[t] <- sum(infupdate1718_0[t,2:ncol(infupdate1718_0)])}
infupdaters1718_0 ## total of updaters
## participants
inftotal1718 <- numeric(12)
infparti1718<- matrix(0, nrow = nrow(infidpoint1718_0 ), ncol = ncol(infidpoint1718_0 ) )
for (j in 2:ncol(infidpoint1718_0)) {for (i in 1:nrow(infidpoint1718_0 )) { infparti1718[i,j] <- ifelse (infidpoint1718_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1718_0)) {inftotal1718[t] <- sum(infparti1718[t,2:ncol(infparti1718)])}
inftotal1718
infupdaters1718_0 <- infupdaters1718_0[2:12]/inftotal1718[2:12]
infupdaters1718_0
##2018-19
inf218_id2 <- subset(newdf,inft == "infgen_a19" & variable =="infgentmas1" & analyst == 4 )
inf218_id2
##df for only inflation of the current period
infdf1819 <- NULL # empty subset
infpon1819 <- matrix()
infidpoint1819 <- add.col(gy1819, infpon1819)
infidpoint1819
## data frame with point forecast from analyst 1 to 91
for(t in 1:91) {infdf1819 <- subset(newdf,inft == "infgen_a19" & variable =="infgentmas1" & analyst == t )
infidpoint1819 <- add.col(infidpoint1819,infdf1819[,5]) }
infidpoint1819
infidpoint1819_0 <- infidpoint1819
infidpoint1819_0[is.na(infidpoint1819_0)] <- 0
infidpoint1819_0
infupdate1819_0 <- matrix(0, nrow = nrow(infidpoint1819_0 ), ncol = ncol(infidpoint1819_0 ) )
for (j in 2:ncol(infidpoint1819_0 )) {for (i in 2:nrow(infidpoint1819_0)) { infupdate1819_0[i,j] <- ifelse (infidpoint1819_0[i,j] == infidpoint1819_0[i-1,j],0,1) & (infidpoint1819_0[i,j]!=0) }}
infupdate1819_0
## create a new column with proportion of updates
infupdaters1819_0 <- numeric(nrow(infidpoint1819_0))
for(t in 2:nrow(infidpoint1819_0 )) {infupdaters1819_0[t] <- sum(infupdate1819_0[t,2:ncol(infupdate1819_0)])}
infupdaters1819_0 ## total of updaters
## participants
inftotal1819 <- numeric(11)
infparti1819<- matrix(0, nrow = nrow(infidpoint1819_0 ), ncol = ncol(infidpoint1819_0 ) )
for (j in 2:ncol(infidpoint1819_0)) {for (i in 1:nrow(infidpoint1819_0 )) { infparti1819[i,j] <- ifelse (infidpoint1718_0[i,j] != 0,1,0) }}
for(t in 1:nrow(infidpoint1819_0)) {inftotal1819[t] <- sum(infparti1819[t,2:ncol(infparti1819)])}
inftotal1819
infupdaters1819_0 <- infupdaters1819_0[2:11]/inftotal1819[2:11]
infupdaters1819_0
inf2updaters <- c(infupdaters0506_0, infupdaters0607_0, infupdaters0708_0, infupdaters0809_0, infupdaters0910_0, infupdaters1011_0, infupdaters1112_0, infupdaters1213_0, infupdaters1314_0, infupdaters1415_0, infupdaters1516_0, infupdaters1617_0, infupdaters1718_0, infupdaters1819_0 )
## Consensus
inf2cons05 <- numeric(12)
inf2cons06 <- numeric(12)
inf2cons07 <- numeric(12)
inf2cons08 <- numeric(12)
inf2cons09 <- numeric(12)
inf2cons10 <- numeric(12)
inf2cons11 <- numeric(12)
inf2cons12 <- numeric(12)
inf2cons13 <- numeric(12)
inf2cons14 <- numeric(12)
inf2cons15 <- numeric(12)
inf2cons16 <- numeric(12)
inf2cons17 <- numeric(12)
inf2cons18 <- numeric(11)
infidpointNA0506 <- infidpoint0506[ , colSums(is.na(infidpoint0506)) == 0]
for(t in 1:nrow(infidpointNA0506)) {inf2cons05[t] <- sum(infidpointNA0506[t,2:ncol(infidpointNA0506)])/(ncol(infidpointNA0506)-1)}
inf2cons05
infidpointNA0607 <- infidpoint0607[ , colSums(is.na(infidpoint0607)) == 0]
for(t in 1:nrow(infidpointNA0607)) {inf2cons06[t] <- sum(infidpointNA0607[t,2:ncol(infidpointNA0607)])/(ncol(infidpointNA0607)-1)}
inf2cons06
infidpointNA0708 <- infidpoint0708[ , colSums(is.na(infidpoint0708)) == 0]
for(t in 1:nrow(infidpointNA0708)) {inf2cons07[t] <- sum(infidpointNA0708[t,2:ncol(infidpointNA0708)])/(ncol(infidpointNA0708)-1)}
inf2cons07
infidpointNA0809 <- infidpoint0809[ , colSums(is.na(infidpoint0809)) == 0]
for(t in 1:nrow(infidpointNA0809)) {inf2cons08[t] <- sum(infidpointNA0809[t,2:ncol(infidpointNA0809)])/(ncol(infidpointNA0809)-1)}
inf2cons08
infidpointNA0910 <- infidpoint0910[ , colSums(is.na(infidpoint0910)) == 0]
for(t in 1:nrow(infidpointNA0910)) {inf2cons09[t] <- sum(infidpointNA0910[t,2:ncol(infidpointNA0910)])/(ncol(infidpointNA0910)-1)}
inf2cons09
infidpointNA1011 <- infidpoint1011[ , colSums(is.na(infidpoint1011)) == 0]
for(t in 1:nrow(infidpointNA1011)) {inf2cons10[t] <- sum(infidpointNA1011[t,2:ncol(infidpointNA1011)])/(ncol(infidpointNA1011)-1)}
inf2cons10
infidpointNA1112 <- infidpoint1112[ , colSums(is.na(infidpoint1112)) == 0]
for(t in 1:nrow(infidpointNA1112)) {inf2cons11[t] <- sum(infidpointNA1112[t,2:ncol(infidpointNA1112)])/(ncol(infidpointNA1112)-1)}
inf2cons11
infidpointNA1213 <- infidpoint1213[ , colSums(is.na(infidpoint1213)) == 0]
for(t in 1:nrow(infidpointNA1213)) {inf2cons12[t] <- sum(infidpointNA1213[t,2:ncol(infidpointNA1213)])/(ncol(infidpointNA1213)-1)}
inf2cons12
infidpointNA1314 <- infidpoint1314[ , colSums(is.na(infidpoint1314)) == 0]
for(t in 1:nrow(infidpointNA1314)) {inf2cons13[t] <- sum(infidpointNA1314[t,2:ncol(infidpointNA1314)])/(ncol(infidpointNA1314)-1)}
inf2cons13
|
95cfec8639eeb9dcccd3b80e2032dac4cce5cc76 | 9535af0ac6a72c149570514bf046babb6a0995a9 | /Import_copy_number.R | e5068b8706c433ba29bfb5a48c4bc7aded991947 | [] | no_license | FNLCR-DMAP/LP_epi_pred | b44e8aa33e2ded8318ad30f0e92c7a30855d78b7 | db3a7644f47f78a2c486669681621ab04f465084 | refs/heads/master | 2023-06-06T19:59:10.532415 | 2021-06-22T15:38:14 | 2021-06-22T15:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,054 | r | Import_copy_number.R | require(data.table)
#amplifications and deletions (ALL)
CNA_all <- fread(paste0(path_prefix,"Del_amp_conumee_bins_neuro_baselinecorrection_preprocessRaw_XYincl_sub30_GMAF1p_minwidth3.txt"),
stringsAsFactors = FALSE, check.names = FALSE, na.strings = "")
CNA_deleterious <- CNA_all[rowSums(is.na(CNA_all[,7:13]))!=7,]
#breakpoints
CNA_breakpoint_fusions <- fread(paste0(path_prefix,"Breakpoints_conumee_neuro_baselinecorrection_preprocessIRaw_XYincl_sub30_GMAF1p_minwidth3_10kb_possiblefusions.txt"),
stringsAsFactors = FALSE, check.names = FALSE)
names(CNA_breakpoint_fusions) <- c("sample","matches")
CNA_breakpoint_fusions$matches <- gsub("\\.", "-", CNA_breakpoint_fusions$matches)
CNA_breakpoint_fusions <- CNA_breakpoint_fusions[!CNA_breakpoint_fusions$matches=="",]
CNA_breakpoint_genes <- fread(paste0(path_prefix,"./Conumee_baselinecorrection_preprocessRaw_XYincl_sub30_GMAF1p_minwidth3_breakpoint_genes_10kb.txt"))
CNA_breakpoint_genes <- CNA_breakpoint_genes[!CNA_breakpoint_genes$gene=="",]
|
80884501fb1f81e76437de1b2d2b542ece8d1060 | 7833e4754f301593059953421e422a3129c9f64b | /statintroRproj/Chapter4.R | 278ba9fcb8eae7e32d40ea67a253f5325c9c6549 | [] | no_license | anhnguyendepocen/Stat-Intro | a67ea5b0b197e3f47e965b1cc948651855bf5728 | 22a756bd1a9ecacbc55d2c13d7cb62e0106890e9 | refs/heads/master | 2020-11-24T11:30:45.475523 | 2019-05-14T23:32:57 | 2019-05-14T23:32:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,387 | r | Chapter4.R | library(lme4)
library(lmerTest)
photo <- read.csv(file = "Data/Prac4photosynthesis.csv")
m_noblock <- lm(PhotoRate~Temp, data=photo)
anova(m_noblock)
m_block <- lm(PhotoRate~Temp+ as.factor(Position), data=photo)
anova(m_block)
m_block_re <- lmer(PhotoRate~Temp+ (1|Position), data=photo)
anova(m_block_re)
lmerTest::ranova(m_block_re)
summary(m_block_re)
drought <- read.csv("Data/Prac3droughtdata.csv")
drought$Genotype<-relevel(drought$Genotype, ref="WT")
drought$WaterCondition<-relevel(drought$WaterCondition, ref="Normal")
ggplot(drought, aes(x=interaction(Genotype, WaterCondition), y=Temperature, color=as.factor(plant)))+
geom_point()+xlab("Genotype-by-exposure")
lm.drought <- lm(Temperature ~ Genotype*WaterCondition, data=drought)
anova(lm.drought)
lmer.drought <- lmer(Temperature ~ Genotype*WaterCondition + (1|plant), data=drought)
anova(lmer.drought)
#dark respiration
resp2 <- read.csv("Data/Prac4darkrespiration.csv")
resp2$Day <- sample(x = c(1:5), size = nrow(resp2), replace = TRUE)
write.csv("Data/Prac4darkrespiration.csv", quote = FALSE,x = resp2)
mm0 <- lmer(Dry_mass_resp ~ 1 + Leaf_section+ (1|Species/Plant_ID) + (1|Day), data = resp2)
summary(mm0)
mm0 <- lmer(Dry_mass_resp ~ 1 + Leaf_section+ (1|Day/Leaf_stage/Plant_ID), data = resp2)
mm0 <- lmer(Dry_mass_resp ~ 1 + Leaf_section+ (1|Leaf_stage/Plant_ID/Day), data = resp2)
summary(mm0)
|
dea7acaba25fadcdc727500ab9950864ab89e6df | 04e2b30e8ffd827c7300463280884901270cf820 | /plot6.R | 540775b63fe82678d2d7f2e692ca8866a29bc07e | [] | no_license | jpuccia/ExDataProject2 | e152be126a4b119253df0ec75c64d10e86a86cdc | e2942b3e47e76543b19bdd80742850adbb761e0e | refs/heads/master | 2016-09-09T20:55:44.045749 | 2015-07-26T21:57:23 | 2015-07-26T21:57:23 | 39,736,132 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,220 | r | plot6.R | ## plot6.R
##
## Task: Create a plot that shows the total vehicle emissions of PM2.5 in
##... Baltimore (fips=24510) and Los Angeles (fips=06037) by year to
##... determine if emissions are increasing or decreasing and to
##... compare the two cities.
##
plot6 <- function(){
library(ggplot2)
library(dplyr)
library(reshape2)
## Read the emissions data from the data folder under the working directory
nei <- readRDS("./data/summarySCC_PM25.rds")
## We need to load the scc data to determine the source of
##... emissions is from vehicles.
scc <- readRDS("./data/Source_Classification_Code.rds")
## Join our SCC lookup table with the nei data by performing a merge
neiMerge <- merge(nei, scc, by = "SCC")
## Melt the data down to group emissions by year and fips but only for
##... vehicle related source data for Baltimore and Los Angeles.
vehicleSources <- unique(scc$EI.Sector[grep("vehicle", scc$EI.Sector, ignore.case = TRUE)])
neiMelt <- melt(
neiMerge[(neiMerge$fips=="24510" | neiMerge$fips=="06037") & neiMerge$EI.Sector %in% vehicleSources,],
id=c("fips", "year"), measure.vars=c("Emissions"))
## Sum the Emissions by Year
neiVehicle <- dcast(neiMelt, fips + year ~ variable,sum)
## Change the "fips" column to "City"
names(neiVehicle)[1] <- "City"
## Change the fips code to the city name
neiVehicle[neiVehicle$City=="24510",]$City <- "Baltimore"
neiVehicle[neiVehicle$City=="06037",]$City <- "Los Angeles"
## Plot the data to a png file
png(filename = "plot6.png", width = 480, height = 480, units = "px")
p <- qplot(year, Emissions, data = neiVehicle, group = City, color = City,
geom = c("point", "line"), ylab = "Yearly PM2.5 Emissions (tons)",
xlab = "Year", main = "Total PM2.5 Vehicle Emissions in Baltimore and Los Angeles")
print(p)
## Close the device to plot to a png file with the same name as this function.
invisible(dev.off())
} |
d14c8b89f11833f34fc9ce8717704dfec810e2f9 | d428f569ccad08eb2dc07e961c5ab7e33e6fc515 | /Rscripts/histogram_v5.R | 4959cbf64a0a998c6720ab86fea1fb598301ec80 | [] | no_license | sachingadakh/pyR | 1fd12ecc44609d72e072709961363073b1a668c0 | 45b12f83bfe18a8b67d2075a8e0f15f0c2d3f9b8 | refs/heads/master | 2020-04-02T21:54:54.516802 | 2018-11-02T15:46:47 | 2018-11-02T15:46:47 | 154,815,109 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,302 | r | histogram_v5.R | G600_new <- read.table("W200_G600_peak_widths.txt", header = T, row.names = NULL, sep = "\t", fill = T)
head(G600_new)
names <- colnames(G600_new)
# library(compare)
# comparison <- compare(G600_new[,"KFO6"],G600[,"H3K9me3_KFO6.Peak.Width"],allowAll=TRUE)
# comparison$tM
# a1<-as.data.frame(G600_new[,"KFO6"])
# head(a1)
# nrow(a1)
# a2<-as.data.frame(G600[,"H3K9me3_KFO6.Peak.Width"])
# head(a2)
# nrow(a2)
# difference <- data.frame(lapply(1:ncol(G600_new[,"KFO6"]),function(i)setdiff(G600_new[,"KFO6"][,i],comparison$tM[,i])))
# write.table(a1,"a1.txt")
# write.table(a2,"a2.txt")
# require(sqldf)
# a1NotIna2 <- sqldf('SELECT * FROM a2 EXCEPT SELECT * FROM a1')
# head(a1NotIna2)
#count = 0
file=paste("KashmirPeakWidth","_v1.pdf",sep = "")
par(mfrow=c(1,1))
dev.copy(pdf, file)
K_PeakWidth <- integer()
for (name in names) {
if (grepl("^K", name )) {
K_PeakWidth=append(K_PeakWidth,G600_new[,name])
# print(name)
# print(G600[,name])
}
}
hist(K_PeakWidth, xlab = "Kashmir samples Peak width", col = "grey", main = "", breaks = 10000, xlim = c(0,10000))
par(mfrow=c(4,5))
for (name in names){
if (grepl("^K", name)) {
# if (count %% 9 == 0){
# dev.off()
# file=paste(count,".pdf",sep = "")
# par(mfrow=c(3,3))
# dev.copy(pdf, file)
# }
#count = count + 1 G600[,name]
#print(name)
hist(G600_new[,name], xlab = name, col = "grey", main = "", breaks = 10000, xlim = c(0,10000), ylim = c(0,8000))
}
}
dev.off()
file=paste("TamilnaduPeakWidth","_v1.pdf",sep = "")
par(mfrow=c(1,1))
dev.copy(pdf, file)
T_PeakWidth <- integer()
for (name in names) {
if (grepl("^T", name )) {
T_PeakWidth=append(T_PeakWidth,G600_new[,name])
# print(name)
# print(G600[,name])
}
}
hist(T_PeakWidth, xlab = "Tamilnadu samples Peak width", col = "grey", main = "", breaks = 10000, xlim = c(0,10000))
par(mfrow=c(4,5))
for (name in names){
if (grepl("^T", name)) {
# if (count %% 9 == 0){
# dev.off()
# file=paste(count,".pdf",sep = "")
# par(mfrow=c(3,3))
# dev.copy(pdf, file)
# }
#count = count + 1 G600[,name]
#print(name)
hist(G600_new[,name], xlab = name, col = "grey", main = "", breaks = 10000, xlim = c(0,10000), ylim = c(0,8000))
}
}
dev.off()
|
7b033e3fccb7d10b359ec72cffa1ba78f794cb92 | c539c989ec430b74cb22d5b2adb07afc40ba6bdc | /FitnessStats.R | b90c51a6dba0fdb086b3c2d5dcb020fd19c0068b | [] | no_license | rachaelbay/Acropora-hyacinthus-transplant-experiment | b095b79d0f856f1daef5e7f8bd2fc015c205d229 | c0d464a4b94b20d1420d83596cc8525eab019025 | refs/heads/master | 2021-01-17T07:10:45.151353 | 2016-11-06T17:10:10 | 2016-11-06T17:10:10 | 62,743,206 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,318 | r | FitnessStats.R | library(lme4)
library(lmerTest)
library(WGCNA)
##Read in data
data <- read.delim("SuppTab1.txt",header=T)
##############
## GLMM ##
##############
###Survival
glmerControl=glmerControl(optimizer="bobyqa")
surv.full <- glmer(Survival~Origin*Location+(1|Crate)+(1|Colony),data=data,
family="binomial",control=glmerControl) ##Full model
surv.M1 <- glmer(Survival~Origin+(1|Colony)+(1|Crate),data=data,
family="binomial",control=glmerControl) ##Best model
AIC(surv.full,surv.M1)
summary(surv.M1)
###Growth
growth.full <- lmer(BW~Origin*Location+(1|Crate)+(1|Colony),data=data) ##Full model
growth.M1 <- lmer(BW~Location+(1|Crate)+(1|Colony),data=data) ##Best model
AIC(growth.full,growth.M1)
summary(growth.M1)
##########################
## Crate Normalization ##
##########################
rownames(data) <- paste(data$Colony,data$Crate,sep="")
goodData <- data[!is.na(data$Survival),]
##Survival
HVcrates <- lm(scale(goodData$Survival[goodData$Location=="HV"],center=T,scale=F)~goodData$Crate[goodData$Location=="HV"])$residuals# + mean(goodData$BW[goodData$Location=="HV"],na.rm=T)
MVcrates <- lm(scale(goodData$Survival[goodData$Location=="MV"],center=T,scale=F)~goodData$Crate[goodData$Location=="MV"])$residuals# + mean(goodData$BW[goodData$Location=="MV"],na.rm=T)
surv.crates <- c(HVcrates,MVcrates)
survival <- cbind(goodData,surv.crates)
##Growth
survivors <- data[!is.na(data$BW),]
HVcrates <- lm(scale(survivors$BW[survivors$Location=="HV"],center=T,scale=F)~survivors$Crate[survivors$Location=="HV"])$residuals# + mean(survivors$BW[survivors$Location=="HV"])
MVcrates <- lm(scale(survivors$BW[survivors$Location=="MV"],center=T,scale=F)~survivors$Crate[survivors$Location=="MV"])$residuals# + mean(survivors$BW[survivors$Location=="MV"])
growth.crates <- c(HVcrates,MVcrates)
growth <- cbind(survivors,growth.crates)
##Colony means
frame <- cbind(data,normSurv=survival$surv.crates[match(rownames(data),rownames(survival))],
normGrowth=growth$growth.crates[match(rownames(data),rownames(growth))])
means <- aggregate(frame[,8:9],list(Colony=frame$Colony,Origin=frame$Origin,Location=frame$Location),mean,na.rm=T)
meansframe <- cbind(meansHV=means[1:21,],meansMV=means[22:42,])
##MV Growth vs. HV survival
summary(lm((meansframe[,10]~meansframe[,4])))
plot(meansframe[,10]~meansframe[,4])
|
f4a242a67573c04207897ebb5684be53bd0a0b49 | b25934d0200b909e9be4e0759ab6f1f88d659087 | /IndexSSP/session1.R | 84c82436809f375d2ce19edd3aae0a830a7fe6c4 | [] | no_license | shahar-siegman/old_projects | 8a94d67f2fd6f6f861fc903ac246b4d468395998 | 411756f7ae03e798a8376a41ce1ec57b9eda520f | refs/heads/master | 2021-10-19T04:59:14.538249 | 2015-12-22T15:18:26 | 2015-12-22T15:18:26 | 171,240,632 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,997 | r | session1.R | library(ggplot2)
library(gridExtra)
simulateSingleFile <- function(fname) {
auctions <- loadAuctions(fname)
performanceDF <- resultsForRange(auctions,0.01,50)
return(performanceDF)
}
loadAuctions <- function(inputFile=NA) {
if(identical(inputFile,NA))
inputFile <- "C:\\Shahar\\Projects\\IndexSSP\\data.txt"
# Read in the data
x <- scan(inputFile, what="", sep="\n")
# Separate elements by one or more whitepace
z <- strsplit(x, ",") # fields after split: bids (seperated by ;), floorprice, responses
trueFloorPrices <- lapply(z,`[`,2)
y <- lapply(z,`[`,1) # take bids, throw rest
y <- unlist(y)
y <- strsplit(y,";") # split to individual bids
y <- lapply(y,as.numeric)
auctions <- lapply(y,sort,TRUE)
firsts <- unlist(lapply(auctions,`[`,1))
seconds <- unlist(lapply(auctions,`[`,2))
nAuctions <- sum(length(firsts))
recordsToRemove <- is.na(firsts) | firsts==0
firsts <- firsts[!recordsToRemove]
seconds <- seconds[!recordsToRemove]
trueFloorPrices <- as.numeric(trueFloorPrices[!recordsToRemove])
seconds[is.na(seconds)] <- 0
return(list(firsts,seconds,nAuctions,trueFloorPrices))
}
resultsForRange <- function(auctions, minFloorPrice, maxFloorPrice) {
floorPrices <- sort(unique(unlist(auctions[1:2])))
if (length(floorPrices)==0) {
print ("No bidding information supplied")
return(data.frame())
}
print (maxFloorPrice)
floorPrices <- floorPrices[floorPrices>=minFloorPrice & floorPrices <= maxFloorPrice]
if (maxFloorPrice==-Inf) {
print("No bids in range")
return(data.frame())
}
r <- length(floorPrices)
result <- matrix(nrow=r,ncol=2,dimnames = list(rep(NA,r),c("wins","revenue")))
for (i in 1:r) {
fp <- floorPrices[i]
result[i,] <- resultByFloorPrice(auctions,fp)
}
actualResult <- resultByFloorPrice(auctions,auctions[[4]])
df <- data.frame(floorPrice=floorPrices,wins=result[,1],revenue=result[,2])
df <- rbind(df,c(mean(auctions[[4]]),actualResult))
df$floorPriceType <- "simulated"
df$floorPriceType[nrow(df)] <- "actual"
df$auctions <- auctions[[3]]
df$fill=df$wins / df$auctions
df$eCpm=df$revenue / df$wins
return(df)
}
resultByFloorPrice <- function(auctions,floorPrice) {
firsts <- auctions[[1]]
seconds <- auctions[[2]]
winners <- floorPrice <= firsts
aboveSeconds <- winners & floorPrice >= seconds
revenue <- sum((floorPrice+0.01)*aboveSeconds) + sum(seconds[winners & !aboveSeconds])
return (c(sum(winners), revenue))
}
loopFiles <- function() {
inputdir="placement_bids\\"
outputdir="results\\"
flist <- list.files(inputdir, pattern="*.csv");
for (fname in flist) {
print (fname)
y <- simulateSingleFile(paste(inputdir,fname,sep=""))
if (nrow(y)>0) {
write.csv(y,file=paste(outputdir,fname,sep=""))
}
else
print ("skipping write csv")
}
}
loopResults <- function() {
inputdir="results/"
outputdir="graphs/"
flist <- list.files(inputdir, pattern="*.csv");
for (fname in flist) {
df <- read.csv(paste(inputdir,fname,sep=""))
p1 <- ggplot(data=df, aes(y=wins, x=floorPrice, colour=floorPriceType, size=floorPriceType))+
geom_line()+geom_point()+scale_size_manual(values=c(4,0.5))
p2 <- ggplot(data=df, aes(y=eCpm, x=fill, colour=floorPriceType, size=floorPriceType))+
geom_line()+geom_point()+scale_y_log10()+scale_size_manual(values=c(4,0.5))
p3 <- ggplot(data=df, aes(y=fill, x=floorPrice, colour=floorPriceType, size=floorPriceType))+
geom_line()+geom_point()+scale_size_manual(values=c(4,0.5))
p4 <- ggplot(data=df, aes(y=revenue, x=fill, colour=floorPriceType, size=floorPriceType))+
geom_line()+geom_point()+scale_size_manual(values=c(4,0.5))
fname=paste(outputdir,filename=substr(fname,1,nchar(fname)-4),".png",sep="")
print(paste("file: ",fname,", rows:",nrow(df)))
# output a png file with the four graphs on a 2x2 grid:
png(fname, width=960, height=960)
grid.arrange(p1,p2,p3,p4,nrow=2,ncol=2)
dev.off()
}
}
|
48f263506907767333551642aae615a0f13cadfe | c617ac82efd11fbf1e0b04a78d1fe8c341dd3bb5 | /Assign1/complete.R | f52e16f704b3d9fb451c87da9c19f65877cca4d5 | [] | no_license | kartikeyakirar/R_coursera | b94bea052e57c0d015ffa43099b0d96f61a8e318 | 5bc7fc371f565a8fa0b29b93d6a7585eef4af139 | refs/heads/master | 2020-12-31T07:09:24.525313 | 2016-05-20T18:47:24 | 2016-05-20T18:47:24 | 58,585,431 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 273 | r | complete.R | complete <- function(directory, id = 1:332) {
listoffiles<-list.files(directory,full.names = T)
da<-data.frame()
for(t in id)
{
y<-read.csv(listoffiles[t])
x<-sum(complete.cases(y))
da<-rbind(da,c(t,x))
}
colnames(da)<-c("id","nobs")
da
} |
cd5a3646aad3d56aea24773b9a0d14ea78671579 | 02409b4d40b182cf5d044ec8d571075430300908 | /man/tblStrings-package.Rd | 24b9204b4ae7a8c7e506fc0f3d799d2fe672bcaf | [
"MIT"
] | permissive | bcjaeger/tblStrings | f556cb8e070c1b7d19cfcef322b659db9d26def8 | 0ec83e28e20915e16f455992c618aa6d0b9a1c94 | refs/heads/master | 2021-01-01T02:00:42.547564 | 2020-08-13T12:27:04 | 2020-08-13T12:27:04 | 239,131,540 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 771 | rd | tblStrings-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tblStrings-package.R
\docType{package}
\name{tblStrings-package}
\alias{tblStrings}
\alias{tblStrings-package}
\title{tblStrings: What the Package Does (One Line, Title Case)}
\description{
tblStrings helps translate double and integer valued data into
character values formatted with rules that are generally consistent with
the requested formats from academic journals.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/bcjaeger/tblStrings}
\item Report bugs at \url{https://github.com/bcjaeger/tblStrings/issues}
}
}
\author{
\strong{Maintainer}: Byron Jaeger \email{bcjaeger@uab.edu} (\href{https://orcid.org/0000-0001-7399-2299}{ORCID})
}
\keyword{internal}
|
be11f5ad29de3d47007d6521a1b91c9c687ffca7 | 285541e8ae77482ac7eeb5b51ce06edeb96ef246 | /man/crop_curves.Rd | 46c0cc7164c0a77db7b495b2ad8b4718456a0252 | [] | no_license | myllym/GET | 2033c4f590da7cce114b588e7e39b243b543dcdf | 72988291d9c56b468c5dddfb5bc2c23f519b6dca | refs/heads/master | 2023-08-24T23:23:14.364346 | 2023-08-15T21:33:51 | 2023-08-15T21:33:51 | 68,914,145 | 12 | 5 | null | 2022-11-16T07:55:16 | 2016-09-22T11:20:34 | R | UTF-8 | R | false | true | 1,394 | rd | crop_curves.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crop.r
\name{crop_curves}
\alias{crop_curves}
\title{Crop the curves}
\usage{
crop_curves(curve_set, allfinite = TRUE, r_min = NULL, r_max = NULL)
}
\arguments{
\item{curve_set}{A curve_set (see \code{\link{create_curve_set}}) or
an \code{envelope} object of \pkg{spatstat}. If an envelope object is given,
it must contain the summary functions from the simulated patterns which can be
achieved by setting savefuns = TRUE when calling the \code{envelope} function.}
\item{allfinite}{Logical. TRUE means that the argument values where any of the
curves have missing or infinite values are removed. FALSE means that only
\code{r_min} and \code{r_max} apply.}
\item{r_min}{The minimum radius to include.}
\item{r_max}{The maximum radius to include.}
}
\value{
A curve_set object containing the cropped summary functions and
the cropped radius vector.
}
\description{
Crop the curves to a certain interval, or crop missing and infinite argument
values from the curves
}
\details{
The curves can be cropped to a certain interval defined by the arguments r_min and r_max.
Also the argument values of the sets of curves which have missing or infinite
values for any of the curves can be removed from the set (\code{allfinite = TRUE}).
The interval should generally be chosen carefully for classical deviation tests.
}
|
22f769beb1da36fdd2569e11c9088cfc67f9529d | 729528857dc75f00814c3aeb612544a123cdbc4b | /R_scripts/icpsr-cbp1980.R | eb7415655adc2867b309c238e004d1ae1d3640e7 | [] | no_license | Reese565/robots_polar | f52545e8c6330700a4b07f77ce4362c887cd2e05 | 0f79a8202162c5c80ba20ebe67e5451294efa7d8 | refs/heads/main | 2023-04-29T22:26:51.794970 | 2021-05-11T16:44:05 | 2021-05-11T16:44:05 | 366,448,139 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,619 | r | icpsr-cbp1980.R | # Title: ICPSR-CBP 1980
# Description: takes the raw ASCII files for the 1980 County Business Patterns
# survey and transforms them into a dataframe. The raw ASCI files are split
# for the counties by each of the nine Census Divisions.
library(stringr)
library(foreign)
widths <- c(2, 3, 1, 4, 1, 1, 12, 12, 12,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 1, 2, 3, 1, 2, 3, 6)
col_names <- c("STATE2", "COUNTY2", "FILL1", "SICCODE2", "FILL2", "FLAG",
"TEMPMM", "TPAYQ1", "TANPAY", "TESTAB", "CTYEMP1", "CTYEMP2",
"CTYEMP3", "CTYEMP4", "CTYEMP5", "CTYEMP6", "CTYEMP7",
"CTYEMP8", "CTYEMP9", "CTYEMP10", "CTYEMP11", "CTYEMP12",
"CTYEMP13", "FILL3", "SSASTAT2", "SSACTY2", "FILL4", "FIPSTATE",
"FIPSCTY2", "FILL5")
cbp80_dir_path <- "../data/icpsr/cbp1980"
cbp80_dirs <- list.dirs(cbp80_dir_path)
subdirs_boolean <- str_detect(cbp80_dirs, "DS")
cbp80_subdirs <- cbp80_dirs[subdirs_boolean]
cbp80_files <- paste0(cbp80_subdirs, "/", sapply(cbp80_subdirs, list.files))
cbp80_dfs <- lapply(cbp80_files, read.fwf,
widths = widths,
col.names = tolower(col_names))
cbp80_df <- rbind(cbp80_dfs[[2]], cbp80_dfs[[2]])
for (i in 3:length(cbp80_dfs)){
cbp80_df <- rbind(cbp80_df, cbp80_dfs[[i]])
}
unlist(str_extract_all(cbp80_df$siccode2, "[^0-9/-]+"))
cbp80_df[str_detect(cbp80_df$siccode2, "\\s+"),"siccode2"] <- "07--"
cbp80_df$flag <- as.character(cbp80_df$flag)
write.dta(cbp80_df, "../data/icpsr/cbp1980.dta")
read_all <- function(directory, type = "csv",...){}
|
3cb7ca1a612fec9d922cdc756d4131e64b39d06a | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/multivariance/man/coins.Rd | 2d881dacf3ffa4ba43e2bac1980822558d2634a1 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,038 | rd | coins.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{coins}
\alias{coins}
\title{dependence example: k-independent coin sampling}
\usage{
coins(N = 1000, k = 2, type = "even")
}
\arguments{
\item{N}{number of samples}
\item{k}{each k-tuple will be independent}
\item{type}{one of \code{"even"} or \code{"odd"}}
}
\value{
It returns the samples as rows of an \code{N} by \code{k+1} matrix. The columns are dependent but k-independent.
}
\description{
This function creates samples which are dependent but k-independent.
}
\details{
Throw \code{k} independent fair coins. Now consider
the k+1 events: The first shows head, the second shows head,... the \code{k}-th shows head,
there is an \code{even} (or \code{odd} as selected via \code{type}) number of heads. Each row
contains the state of these k+1 events.
}
\examples{
coins(200,4)
}
\references{
For the theoretic background see the reference [3] given on the main help page of this package: \link{multivariance-package}.
}
|
5dab93941f1c37fffb7bc9918a8f099bd2811c31 | 4951e7c534f334c22d498bbc7035c5e93c5b928d | /developers/LuthigerJungwirth.R | ae391b1abe56a11332db41ecff078e0a698c0cfd | [] | no_license | Derek-Jones/ESEUR-code-data | 140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1 | 2f42f3fb6e46d273a3803db21e7e70eed2c8c09c | refs/heads/master | 2023-04-04T21:32:13.160607 | 2023-03-20T19:19:51 | 2023-03-20T19:19:51 | 49,327,508 | 420 | 50 | null | null | null | null | UTF-8 | R | false | false | 6,545 | r | LuthigerJungwirth.R | #
# LuthigerJungwirth.R, 21 Sep 18
# Data from:
# Pervasive Fun
# Benno Luthiger and Carola Jungwirth
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG developer_fun
source("ESEUR_config.r")
library("ordinal")
pal_col=rainbow(10)
# See LuthigerJungwirth.txt for information on columns
fasd=read.csv(paste0(ESEUR_dir, "developers/LuthigerJungwirth.csv.xz"), as.is=TRUE)
fasd[fasd == -1]=NA
# Response variable must be a factor.
# How much (on average, in percent) of your spare time do you spend on
# activities concerning open source projects?
# Response binned into 10% increments
fasd$q42=as.factor(fasd$q42)
fun_mod=clm(q42 ~ q1+q2+q3+q4+q5+q6+q7+q8+q9+
q11+q12+q13+q14+q15+q16+q17+q18+q19+
q21+q22+q23+q24+q25+q26+q27+q28+q29+
q31+q32+q33+q34+q35,
data=fasd)
fun_mod=clm(q42 ~ q5+
# q12+q13+ q16+q17+
# q24+q25+ q29+
q29+
# q31+ q35,
q31 ,
data=fasd)
# summary(fun_mod)
# drop1(fun_mod, test = "Chi")
# confint(fun_mod, type = "Wald")
# round(exp(fun_mod$beta), 1)
# round(exp(confint(fun_mod, type = "Wald")), 1)
# Difference between thresholds
# diff(fun_mod$alpha)
# Which link function gives the best fit?
# Highest (i.e., towards +infinity) is best.
# links = c("logit", "probit", "cloglog", "loglog", "cauchit")
# sapply(links, function(link)
# {
# clm(q42 ~ q5+ q29+ q31,
# data=fasd, link=link)$logLik
# })
# Fitted so a plot can be shown.
f_mod=clm(q42 ~ q31, data=fasd)
# summary(f_mod)
pred=predict(f_mod, newdata=data.frame(q31=1:6))
plot(-1, type="n",
xaxs="i", yaxs="i",
xlim=c(1, 6), ylim=c(0, 0.6),
xlab="Answer given to q31", ylab="Probability\n")
dummy=sapply(1:10, function(X)
lines(1:6, pred$fit[ ,X], col=pal_col[X]))
legend(x="topright", legend=paste0(seq(0, 90, 10), "-", seq(10, 100, 10), "%"), bty="n", fill=pal_col, cex=1.2)
# Data and questionaire kindly provided by Luthiger
#
# How do you feel about being an open source developer?
# How often do the following statements apply to
#
# 1 I lose my sense of time.
# 2 I cannot say how long I’ve been with programming.
# 3 I am in a state of flow when I’m working.
# 4 I forget all my worries when I’m working.
# 5 It’s easy for me to concentrate.
# 6 I’m all wrapped up in the action.
# 7 I am absolutely focused on what I’m programming.
# 8 The requirements of my work are clear to me.
# 9 I hardly think of the past or the future.
# 10 I know exactly what is required of me.
# 11 There are many things I would prefer doing.
# 12 I feel that I can cope well with the demands of the situation.
# 13 My work is solely motivated by the fact that it will pay for me.
# 14 I always know exactly what I have to do.
# 15 I’m very absent-minded.
# 16 I don’t have to muse over other things.
# 17 I know how to set about it.
# 18 I’m completely focused.
# 19 I feel able to handle the problem.
# 20 I am extremely concentrated.
# 21 I’m looking forward to my programming work.
# 22 I enjoy my work.
# 23 I feel the demands upon me are excessive.
# 24 Things just seem to fall into place.
# 25 I forget everything around me.
# 26 I accomplish my work for its own sake.
# 27 I completely concentrate on my programming work.
# 28 I am easily distracted by other things.
# 29 I’m looking forward to further development activities for open source software.
# 30 I’m prepared to increase my future commitment in the development of open source software.
# 31 With one more hour in the day, I would program open source software.
# How do you feel about the open source projects?
# 32 How often are the open source projects you work on based on a definite project vision?
# 33 How often is there a deadline for your open source projects?
# 34 How important is the vision behind an open source project for you to participate in the project?
# 35 How important is the professional competence of the project leader for your commitment in an open source project?
# 36 As a project member: What are your reasons for participating in open source projects?
# projects?
# a It was important for my work to have a certain functionality; that’s why I joined the open source project and got involved.
# b My employer asked me to participate in the open source project because he needed its functionality.
# c Because I wanted to do something for the open source community.
# d The project promised to be fun.
# e My colleagues motivated me to participate in the open source project.
# f By participating in the open source project you could become famous.
# g Because I wanted to learn and develop new skills.
# 37 As a project leader: For what reasons did you initiate your open source project(s)?
# a I needed a certain functionality for my work, and I wanted to make this functionality available as an open source application.
# b My employer asked me to start the open source project because he needed the functionality.
# c My employer asked me to start the open source projectbecause he could earn money with the application.
# d Because I wanted to do something for the open sourcecommunity.
# e One open source project in the past didn’t develop asdesired, therefore I had to start my own.
# f The project promised to be fun.
# g I needed assistants to complete a software project.
# h With an open source project you could become famous.
# How do you organize your time and commitment in theopen source area?
# 38 How many patches have you developed for open source software?
# 39 How many classes/modules/files etc. have you developed for open source software?
# 40 Please estimate the time you spend for the development of open source software (average hours per week).
# 41 Of the total time spent for the development of open source software, how much in percent is part of your spare time?
# 42 How much (on average, in percent) of your spare time do you spend on activities concerning open source projects?
# 43 Please specify the most important position you hold or held in an open source project.
# Demographic data:
# 44 Please state the year in which you started to develop open source software.
# 45 How old were you when you started to develop open source software?
# 46 Do you have children?
# 47 Are there other adults living in the same household with you?
#
|
1cef64f2c076ebb2b0698f6d891d60a3092ea888 | eaa91909789a4e5dd8e01f35d6164c53b2a90225 | /cachematrix.R | 0b2583cc9621c7d09296c7573e2a872ba8350d14 | [] | no_license | gxdlarson/ProgrammingAssignment2 | 7ed69c3292d9e2ec6fd6bb8e441aca176756d853 | 5e811f5f8ffe81acdb7eb1b43b502f4381f9181c | refs/heads/master | 2020-12-27T08:41:25.177204 | 2014-04-21T21:33:16 | 2014-04-21T21:33:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,296 | r | cachematrix.R | ## the purpose of the following two functions is to create a
## special object that stores a numeric matrix and cache's its
## inverse matrix. the inverse matrix is cached in the sense
## that it can be computed once and then stored for further use.
## the makeCacheMatrix() function will create
## a list object with four (4) functions. these
## functions consist of setters and getters for
## the matrix 'x' and its inverse 'm'.
## note: both 'x' and 'm' are defined in
## the makeCacheMatrix() environment and are
## not local to any of the setters and/or getters.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## setter function to initialize or change
## the matrix 'x' and set the inverse matrix
## 'm' to null.
set <- function(y) {
## perhaps there should be a check to see
## whether x and y are equal before doing
## the following assignments. oh well ...
x <<- y
m <<- NULL
}
## getter function to retrieve the matrix 'x'
get <- function() x
## setter function to set inverse matrix 'm' to
## value supplied with inverse parameter.
## this function should only be called from the
## cacheSolve() function. otherwise, there is
## no guarantee the inverse matrix here is valid.
setinverse <- function(inverse) m <<- inverse
## getter function to get the inverse matrix 'm'
getinverse <- function() m
## function declarations for list items.
## enables the use of x$<item> syntax
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## the cacheSolve() function returns the inverse matrix 'm'
## of a matrix 'x' created with the makeCacheMatrix() function.
## if the inverse matrix 'm' is null, then this function will
## compute the inverse matrix by calling the solve() function
## with matrix 'x'.
cacheSolve <- function(x, ...) {
## get the inverse matrix 'm' stored in 'x'
m <- x$getinverse()
## if inverse exists,
## just return it.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## if inverse does not exist,
## compute it, store it,
## return it.
m <- solve(x$get(), ...)
x$setinverse(m)
m
}
|
4238a69512c30092771212f84457bc7afbd936a8 | 411b3724bd36e8ff2592ad4d7a28c080b9386303 | /R/mini_capture_analysis.R | f645d782ffa4cb8516770bc00abcb3041e0f1d5f | [] | no_license | NathanWhitmore/GAOSrmark | 1a290e5bb992244a4091828f70fec7f19a88c6f9 | 7f592ea3eac41d7a6888f778f01b155507a82333 | refs/heads/main | 2023-01-04T13:23:22.476354 | 2020-10-19T00:09:34 | 2020-10-19T00:09:34 | 301,903,632 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,019 | r | mini_capture_analysis.R | # closed capture Huggins analysis
# closed capture Huggins analysis
mini_capture_analysis <- function (path, site_species, year) {
suppressMessages(conflict_prefer("filter", "dplyr"))
suppressMessages(conflict_prefer("here", "here"))
# setwd to save
setwd(paste0(here(), "/", path, "/Results"))
# change to character
ch <- data.frame(ch = as.character(encounter.his$ch))
ch$ch <- as.character(ch$ch)
# read the site_species.inp file
Mt1 <<- length(ch$ch)
# Huggins models
pdotshared=list(formula=~1,share=TRUE)
ptimeshared=list(formula=~time,share=TRUE)
ptime.c=list(formula=~time+c,share=TRUE)
ptimemixtureshared=list(formula=~time+mixture,share=TRUE)
pmixture=list(formula=~mixture)
message("Modelling beginning")
# Candidate model set
# Mo
Mo <- mark(ch, model="Huggins", model.name=paste0(site_species, ".Mo"),
model.parameters=list(p=pdotshared))
# Mt
Mt <- mark(ch, model="Huggins", model.name=paste0(site_species, ".Mt"),
model.parameters=list(p=ptimeshared),adjust=TRUE)
message("Modelling complete: Mo and Mt only \n")
message("Estimates:")
updated <- suppressWarnings(collect.models())
# updated AICctable
AICctable <<- suppressWarnings(model.table(updated))
ModList <<- suppressWarnings(updated)
# parsing top model
name <- str_replace_all(AICctable[1,3], paste0(site_species,"."), "")
mod <- eval(parse(text=name))
# get derived estimates
data <- mod$results$derived$`N Population Size`
data$individuals <- length(ch$ch)
data$year <- year
top.estimates <<- data
# data analysis and estimator type
data$analysis <- "closed capture"
data$estimator <- "top model"
data$site_species <- site_species
# saving path getwd()
setwd(paste0(here(), "/", path))
write.csv(AICctable[,-c(1,2)], paste0(site_species, "_AICctable_closed", ".csv"), row.names = FALSE)
write.csv(data, paste0(site_species, "_estimates_closed", ".csv"), row.names = FALSE)
return(data)
}
|
b71a32496af751e739cab0a155bf1592ae4f54dd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/allanvar/examples/gyroz.Rd.R | ece1890b2b179519b784c387966a5dc68eb92af4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 203 | r | gyroz.Rd.R | library(allanvar)
### Name: gyroz
### Title: Gyro sensor output for one-axis simulated gyroscope
### Aliases: gyroz
### Keywords: datasets
### ** Examples
data(gyroz)
frequency(gyroz)
plot(gyroz)
|
fd4e176b26708367a0a98fd91a0f927c7f39478d | 760239720b2efdf04c2e7d8c241c481e9da1627d | /cachematrix.R | a339e51dd9536010bc8866c3a89c50327ab231ba | [] | no_license | sergbelich/ProgrammingAssignment2 | c7945482930f6b4736816a482973190505032c4a | 1604376b76b7d239801976d0a2e8e8d3ce6bdcb8 | refs/heads/master | 2021-01-16T19:34:54.327960 | 2016-02-22T05:02:48 | 2016-02-22T05:02:48 | 52,245,914 | 0 | 0 | null | 2016-02-22T04:00:49 | 2016-02-22T04:00:48 | null | UTF-8 | R | false | false | 2,331 | r | cachematrix.R | # Assignment: Caching the Inverse of a Matrix
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly (there are
# also alternatives to matrix inversion that we will not discuss here).
# The assignment is to write a pair of functions that cache the inverse of a matrix.
# makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL # Matrix in Cache
}
get <- function() x # Get Matrix
setInverse <- function(solve) m<<- solve # Set Inverse Matrix
getInverse <- function() m # Get Inverse Matrix
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse
) # Create Functions List
}
# The cacheSolve function computes the inverse of the special "matrix" returned
# by makeCacheMatrix above. If the inverse has already been calculated (and the
# matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getInverse() # Get Matrix in Cache
if(!is.null(m)) { # If Previous Cache Calculated
message("getting cached data") # Send Message
return(m) # Return Cache
}
data <- x$get() # Get Matrix Created By Prior Function
m <- solve(data, ...) # Caculate Matrix Inverse
x$setInverse(m) # Save Matrix Inverse In Cache
m
}
# a<-diag(4,4)
# a
# [,1] [,2] [,3] [,4]
# [1,] 4 0 0 0
# [2,] 0 4 0 0
# [3,] 0 0 4 0
# [4,] 0 0 0 4
#
# Matrix_a <- makeCacheMatrix(a)
#
# cacheSolve(Matrix_a)
# [,1] [,2] [,3] [,4]
# [1,] 0.25 0.00 0.00 0.00
# [2,] 0.00 0.25 0.00 0.00
# [3,] 0.00 0.00 0.25 0.00
# [4,] 0.00 0.00 0.00 0.25
|
076bbab55d812b2c479712ab440e1c76a7bddd4f | 5bf8bc5fc2c3ce33658c8ff80f7047aa59aa2920 | /cachematrix.R | 759878697255f3d80bcd564bb309c63733206426 | [] | no_license | HHStarling/ProgrammingAssignment2 | 27e9459c2238190afa73c8830a4cb75c2fce62aa | 480fd99d1ec5bfcc5095bb24ad6d7ce6e747fd8a | refs/heads/master | 2021-01-17T22:43:49.589149 | 2015-09-14T17:40:50 | 2015-09-14T17:40:50 | 42,458,057 | 0 | 0 | null | 2015-09-14T15:40:39 | 2015-09-14T15:40:37 | null | UTF-8 | R | false | false | 1,776 | r | cachematrix.R | ## Functions for Programming assignment 2 - R Programming
## makeCacheMatrix and cacheSolve are used to calculate and store/cache
## the inverse of a matrix.
## programmer: HHStarling 20150914
## TO USE THE FUNCTIONS:
## 1.source this file in working directory
## 2.create matrix and assign to list of functions in makeCacheMatrix
## my_matrix <- makeCacheMatrix (matrix(1:4, nrow=2, ncol=2))
## 3.then solve for inverse matrix using cacheSolve
## cacheSolve(my_matrix)
## *****Functions below *****
## makeCacheMatrix creates a special "matrix" object that can cache its inverse matrix
## last updated: 20150914
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## set function to set the value
set <- function(y) {
x <<- y
m <<- NULL
}
## get function to retrieve the value
get <- function() x
## set the value of the inverse
setinverse <- function(solve) m <<- solve
## get the value of the inverse
getinverse <- function() m
## store these internal functions in a list for the main function
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve computes the inverse of the special "matrix" returned by
## the makeCacheMatrix function above. If the inverse has already been
## calculated, then function returns inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## if value exists already then return the cached value
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## value does not already exist, so calculate inverse, return value
## and save to cache for next time
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
433911629200f60af32f53b996cc5777e820a7a6 | f6c95bb6158e98b2d3d5a2023825cd9ebb9a76bb | /man/leaf_recode.Rd | 1a4ecd096ec37a5dfe4fd3e9da09badb4f765495 | [
"MIT"
] | permissive | rossellhayes/leafpeepr | 81bd5a2e5d9f0ad7ccd5fa79b26bf4111e3e1f41 | 70a5e54b94047fbe597d2095f7fd2ea7f7f7bbfb | refs/heads/main | 2022-06-14T20:07:27.380834 | 2020-02-28T20:50:35 | 2020-02-28T20:50:35 | 243,159,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,900 | rd | leaf_recode.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/leaf_recode.R
\name{leaf_recode}
\alias{leaf_recode}
\title{Recode columns of a data frame}
\usage{
leaf_recode(tbl, code_tbl)
}
\arguments{
\item{tbl}{Data frame to modify}
\item{code_tbl}{A data frame of recoding values, either:
\itemize{
\item A data frame with three columns, \code{variable}, \code{code}, and \code{value}.
\code{variable} matches the column name to recode in \code{tbl}. \code{code} matches the
encoded values in \code{tbl}. \code{value} contains the values that each code
should be replaced with. (For example, see \code{leafpeepr::acs_codes_long}.)
\item A data frame with two adjacent columns for each column to be recoded in
\code{tbl}. One column contains encoded values and can have any name. The
other contains recoded values and has the same name as the column to
recode in \code{tbl}. (For example, see \code{leafpeepr::acs_codes}.)
}
Each code column can contain either literal codes or one-sided formulas.
(For example, compare \code{leafpeepr::acs_race_codes} and
\code{leafpeepr::acs_age_codes}.)}
}
\value{
A data frame with selected columns recoded
}
\description{
Recodes values in a data frame by joining with a data frame of recoded
values.
}
\examples{
# A data frame of encoded data
acs_nh
# A data frame of recode values
acs_sex_codes
#Recode
leaf_recode(acs_nh, acs_sex_codes)
# You can also specify recoding using formulas
acs_bpl_codes
leaf_recode(acs_nh, acs_bpl_codes)
# Or a mix of values and formulas
acs_educ_codes
leaf_recode(acs_nh, acs_educ_codes)
# You can use also use a data frame with recode values for multiple columns
# Either a wide data frame
acs_codes
leaf_recode(acs_nh, acs_codes)
# Or a long data frame
acs_codes_long
leaf_recode(acs_nh, acs_codes_long)
}
\seealso{
\code{\link[dplyr:case_when]{dplyr::case_when()}} to recode data using formulas
}
|
4c96346aa690983a9c8627e59870485613bd5905 | d458a443c28455a13e204fcffd2c3ebe053f5179 | /download-data.r | 2f30312fcd50ce8bead0d11c36fc57128984ac0b | [] | no_license | AndriiGnap/murders | 2fa1502eb0ed7e26d46ca83b3a4785147a93a6cb | a31284bb099a6faa1e914df4945bf35f29deab28 | refs/heads/master | 2023-02-03T22:01:25.738218 | 2020-12-25T15:47:47 | 2020-12-25T15:47:47 | 324,388,965 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7 | r | download-data.r | url <- |
4b7e3d74a2de61dfb298f78d9b00730a43339d6c | d0d4b8f8cd10f31cac78780429d47ef1496faaf5 | /run_analysis.R | 78e83cf6f8d1b38687e0a9cc78d024c76fe0af0e | [] | no_license | abeyg/coursera-data-science-data-cleaning | 41006ceed879653626c6bf8f1382c455fc3bdd4f | deeb6146c13e7516795a24205e56cc637de51fef | refs/heads/master | 2021-01-10T14:43:49.719511 | 2015-11-22T16:18:51 | 2015-11-22T16:18:51 | 46,595,275 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,663 | r | run_analysis.R | #### Creates tidy data from the input by applying various filtering and selection as described in the
#### problem definition of the project (which is copied below).
#### Creates an R script run_analysis.R that does the following:
####
#### 1. Merges the training and the test sets to create one data set.
#### 2. Extracts only the measurements on the mean and standard deviation for each measurement.
#### 3. Uses descriptive activity names to name the activities in the data set
#### 4. Appropriately labels the data set with descriptive variable names.
#### 5. From the data set in step 4, creates a second, independent tidy data set
#### with the average of each variable for each activity and each subject.
####
#### In order to eliminate processing large amounts of unwanted data,
#### merging the data is done after filtering out unwanted data,
#### which is done early for performance reasons.
####
#### Dependency: The script uses data.table and dplyr packages.
#### Define all functions first. the entry point of the script starts at line 56.
#### Read and apply the specified processing on the given dataset.
#### Called with 'test' for test data and 'train' for training data.
read_and_process_data <- function (type) {
# read all relevant data.
X <- read.table(generate_path(type, "X"), sep = "", header = F, stringsAsFactors = F)
y <- read.table(generate_path(type, "y"), sep = "", header = F, stringsAsFactors = F)
sub <- read.table(generate_path(type, "subject"), sep = "", header = F, stringsAsFactors = F)
# subset only mean and standard deviation for each measurement.
X <- X[, mean_or_std_features]
# Assign column names for the features
colnames(X) <- features
# replace activity ids with labels
y[,1] <- activity_labels[y[,1]]
# Assign column names for activity and subject
colnames(y) <- activityLabel
colnames(sub) <- subjectLabel
# Bind subject, x & y data
cbind(sub, y, X)
}
#### Generate path based on type
generate_path <- function(type, name) {
paste0(dataPath, "/", type, "/", name,"_", type, ".txt")
}
#### End of functions
####
#### Entry point.
####
if (!require("data.table")) {
install.packages("data.table")
}
if (!require("dplyr")) {
install.packages("dplyr")
}
require("data.table")
require("dplyr")
#### Global constants: filenames, parameter names etc.
dataPath <- "./UCI HAR Dataset"
activityLabelsFilename <- "activity_labels.txt"
featureFilename <- "features.txt"
outputFilename <- "./tidy_data.txt"
activityLabel <- "Activity"
subjectLabel <- "Subject"
#### Read the feature names
features <- read.table(paste0(dataPath, "/", featureFilename), sep = "", header = F, stringsAsFactors = F)
#### We only need the fetaure names. drop the ids.
features <- features[,2]
#### Only extract mean and standard deviation for each measurement.
mean_or_std_features <- grepl("mean|std", features)
features <- features[mean_or_std_features]
#### Read the activity labels
activity_labels <- read.table(paste0(dataPath, "/", activityLabelsFilename), sep = "", header = F, stringsAsFactors = F)
#### We only need the activity names. drop the ids.
activity_labels <- activity_labels [,2]
#### Read and process test and train data
test_data <- read_and_process_data("test")
train_data <- read_and_process_data("train")
#### Merge training and test
merged_data <- rbind(test_data, train_data)
#### Generate tidy data as the final output
tidy_data <- merged_data %>% group_by(Subject, Activity) %>% summarise_each(funs(mean))
#### Finally save the tidy data
write.table(tidy_data, file = outputFilename, row.names = F, sep=",")
#### End script
|
1714469c0fcaab1a77edb656b53f0c3413ffac52 | 90bc0268ab54edfeb1eb2231e3d40c074b1fc784 | /man/list_named.Rd | d2797e345cc426c767c4de1b8d549dfa280336a0 | [] | no_license | jackwasey/jwutil | e920952f8f42ef609c6019f7107c4256836fb4a9 | d149051dc750a56412c8c7d7d07c1d3619d4f4b2 | refs/heads/master | 2021-01-17T09:26:51.710521 | 2020-01-18T19:58:17 | 2020-01-18T19:58:17 | 24,302,789 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 505 | rd | list_named.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list.r
\name{list_named}
\alias{list_named}
\title{Make a list using input argument names as names}
\usage{
list_named(...)
}
\arguments{
\item{...}{arguments whose names become list item names, and whose values
become the values in the list}
}
\description{
Make a list using input argument names as names
}
\examples{
a <- c(1, 2)
b <- c("c", "d")
stopifnot(
identical(
list_named(a, b),
list(a = a, b = b)
)
)
}
|
0e6f6b4a24c67634b0bc76083c9adccf5c8167ac | ca1366f48b604a67cf37023c71ba69ccaf9fe556 | /R/match_font.R | 00c24d944f6592ffdb3624608059a1f9fc87bfb4 | [
"MIT"
] | permissive | jimhester/systemfonts | 89ce3c025f3956dac0cb06fc0ad2dd53e1cc61dc | e3c5febec9fa879e5524b135edfa00bd68b2464f | refs/heads/master | 2021-07-07T20:45:44.535258 | 2019-06-04T08:42:15 | 2019-06-04T08:42:15 | 190,199,514 | 0 | 0 | NOASSERTION | 2019-06-04T12:45:06 | 2019-06-04T12:45:06 | null | UTF-8 | R | false | false | 411 | r | match_font.R | #' Find a system font by name and style
#'
#' This function locates the font file best matching a name and optional style
#' (italic/bold).
#'
#' @return A path locating the font file
#'
#' @export
#'
match_font <- function(family, italic = FALSE, bold = FALSE) {
if (!is.character(family)) stop("family must be a string", call. = FALSE)
.Call("match_font_c", family, as.logical(italic), as.logical(bold))
} |
1d3dbc59c91f3d61dde9ea1c8e3df7884b6bf915 | 4161c9d898a85ddc9ddcf5efbd5e9806580e7d45 | /cachematrix.R | 5d62c923ecf8f63570b19fe6b289636101ae9709 | [] | no_license | gpodolan/ProgrammingAssignment2 | 73d65974cbf2376163d52667936d5ab10e30f792 | d53891385ac0ac052e42d1cc036c606c461a85a8 | refs/heads/master | 2020-04-01T04:17:59.876453 | 2018-10-13T10:49:48 | 2018-10-13T10:49:48 | 152,857,627 | 0 | 0 | null | 2018-10-13T09:46:30 | 2018-10-13T09:46:30 | null | UTF-8 | R | false | false | 1,425 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
#function to create and store a cached matrix and it's inverse
inv <- NULL #sets inverse to NULL
set <- function(y){
#sets new matrix and inverse to NULL
x <<- y
inv <<- NULL
}
get <- function() x #returns the stored matrix
setinverse <- function(inverse) inv <- inverse #sets the inverse matrix
getinverse <- function() inv #returns the inverse matrix
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) #lists the available functions
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
#calculate the inverse of a matrix, stores it in the cache object and returns the inverse of x
inv <- x$getinverse() #retrieves the currently cached value
if(!is.null(inv)){
#checks if there is a currently stored value and returns it if it exists
message("Getting cached inverse")
return(inv)
}
mat <- x$get() #gets the cached matrix
inv <- solve(mat, ...) #calculates the inverse
x$setinverse(inv) #stores the inverse in the cache object
inv #returns the inverse matrix
}
|
30cb691ab418a0bf3e87574f5d5e99757a41c906 | a54619ef3136e960a8174cd8c31672f680265bca | /S24 Chunker Dataflow Folder/3-R Script Source/1 - Exploring UK Buildings - Functionised.R | efda257b190a57103a21b726aa175fc3e280406b | [] | no_license | tayoso2/pdas_mapping | 47f36791bd88fe0b3ca8fb60c9cd8faf4e0a9844 | 831895fc2ffc28f933a26e4da30bfbc23ca1ed98 | refs/heads/main | 2023-05-07T02:25:07.045575 | 2021-06-02T12:35:51 | 2021-06-02T12:35:51 | 373,146,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,334 | r | 1 - Exploring UK Buildings - Functionised.R | # All packages are loaded as part of "3 - Chunker.R"
# Data load ---------------------------------------------------------------
loadin <- function(buildingpath, n24path, crs = 27700) {
# Load in buildings
buildings <- st_read(buildingpath, stringsAsFactors = FALSE) %>%
st_zm() # remove z coordinate
st_crs(buildings) <- crs
# Create building centroids
buildingcentroids <- st_centroid(buildings)
# Load in n24's
n24 <- st_read(n24path, stringsAsFactors = FALSE) %>%
st_zm() # remove z coordinate
st_crs(n24) <- crs
listypop <- list(buildings, buildingcentroids, n24)
names(listypop) <- c("Buildings", "Centroids", "n24")
return(listypop)
}
# Base Functions ----------------------------------------------------------
findnearestneighbourandgetline <- function(centroids, n24pipes, crs = 27700) {
# centroids should be a series of coordinates for what a "building centroid" is
# n24pipes should be n24 pipes (lines) with the YEAR attribute given by a column (currently hardcoded to
# Unbnd_Y)
# This will return the index of the nearest feature between the centroid of a building and the nearest n24 pipe
fake_pdas_indices <- st_nearest_feature(centroids, n24pipes)
# Assign the nearest n24 index to each pipe from calculated above
# Also give every centroid of each building an index
centroids <- centroids %>%
mutate(nearest_n24 = fake_pdas_indices,
centroid_index = 1:dim(centroids)[1])
# This is what I told Will to do
# Assign a line matching centroid to nearest pipe
# This si much faster
return(centroids %>%
mutate(geometry =
st_nearest_points(centroids,
fake_pdas_indices %>%
as_tibble() %>%
left_join(n24pipes %>%
mutate(value = row_number())) %>%
st_as_sf,
pairwise = T)
) %>%
st_as_sf)
# return(centroids)
}
# Attribute Grabbing ------------------------------------------------------
getattributesandlength <- function(s24pdaslines, n24pipes, nearestindexcol = "nearest_n24") {
s24pdaslines %>%
left_join(n24pipes %>%
st_drop_geometry() %>%
select(c("Unbnd_Y", "Unbnd_M", "Unbnd_D", "Tag", "road_flag", "Type", "index")) %>%
rename( "ConNghYea" = Unbnd_Y,
"ConNghMat" = Unbnd_M,
"ConNghDia" = Unbnd_D,
"ConNghTag" = Tag,
"road_flg" = road_flag,
"ConNghTyp" = Type),
by = c("nearest_n24" = "index"))
}
# Basic Line Drawing ------------------------------------------------------
extractcoordinates <- function(spatialdf, pointnames = c("1","2"), textcol = FALSE, extractcol = "tempgeometry") {
# Insert doc about what the following regex expressions do
if (st_geometry_type(spatialdf)[1] == "LINESTRING" |
st_geometry_type(spatialdf)[1] == "MULTILINESTRING") {
print("Detected LINESTRING geometry")
regexvector <- "(?:\\()(\\d+\\.\\d+|\\d+) (\\d+\\.\\d+|\\d+), (\\d+\\.\\d+|\\d+) (\\d+\\.\\d+|\\d+)"
geotype <- "LINESTRING"
cols <- c(paste0("x-",pointnames[1]),
paste0("y-",pointnames[1]),
paste0("x-",pointnames[2]),
paste0("y-",pointnames[2]))
} else if (st_geometry_type(spatialdf)[1] == "POINT" |
st_geometry_type(spatialdf)[1] == "MULTIPOINT") {
print("Detected POINT geometry")
regexvector <- "(?:\\()(\\d+\\.\\d+|\\d+) (\\d+\\.\\d+|\\d+)"
geotype <- "POINT"
cols <- c(paste0("x-",pointnames[1]),
paste0("y-",pointnames[1]))
} else {
print("Can't proceed as the geometry is not linestring or point")
return(NULL)
}
if (textcol == F) {
# Turn the geometry into text
spatialdf$tempgeometry <- st_as_text(spatialdf$geometry)
spatialdf <- spatialdf %>%
rename(!!sym(extractcol) := tempgeometry) %>%
select(-geometry)
}
spatialdf %>%
as_tibble() %>% # must first actually turn it into a tibble to extract anything
extract(!!sym(extractcol),
into = cols,
regex = regexvector) %>% # extract!
mutate_at(vars(cols), as.numeric)
}
extractcoordinatesnongeom <- function(df, pointnames = c("1","2"), extractcol = "tempgeometry") {
# Insert doc about what the following regex expressions do
if (str_detect(df[1, extractcol], "LINESTRING") == TRUE) {
print("Detected LINESTRING geometry")
regexvector <- "(?:\\()(\\d+\\.\\d+|\\d+) (\\d+\\.\\d+|\\d+), (\\d+\\.\\d+|\\d+) (\\d+\\.\\d+|\\d+)"
geotype <- "LINESTRING"
cols <- c(paste0("x-",pointnames[1]),
paste0("y-",pointnames[1]),
paste0("x-",pointnames[2]),
paste0("y-",pointnames[2]))
} else if (str_detect(df[1, extractcol], "POINT") == TRUE) {
print("Detected POINT geometry")
regexvector <- "(?:\\()(\\d+\\.\\d+|\\d+) (\\d+\\.\\d+|\\d+)"
geotype <- "POINT"
cols <- c(paste0("x-",pointnames[1]),
paste0("y-",pointnames[1]))
} else {
print("Can't proceed as the geometry is not linestring or point")
return(NULL)
}
df %>%
as_tibble() %>% # must first actually turn it into a tibble to extract anything
extract(!!sym(extractcol),
into = cols,
regex = regexvector) %>% # extract!
mutate_at(vars(cols), as.numeric)
}
createnewlinecoords <- function(df, gradcoords = c("x-1","x-2","y-1","y-2"), invgrad = F, centroidcoords = c("x-1", "y-1"), newcoordsname = "newcoords", length = 100) {
# This will create a line in whatever directino you want (by calculating a gradient)
# Grad coords will give gradient
# Centroid coords give where the line starts from
# Length tells you how long to make it (depends on CRS)
# Calculate the gradient and how much x / y values will change
df <- df %>%
rowwise %>%
mutate_at(c(gradcoords), as.numeric) %>%
mutate(gradient = (!!sym(gradcoords[4]) - !!sym(gradcoords[3])) / (!!sym(gradcoords[2]) - !!sym(gradcoords[1]))) %>%
mutate(gradient = ifelse(invgrad == T, -1/gradient, gradient)) %>%
mutate(changex = sqrt((length^2)/((gradient^2)+1)), # Work out change in x / y based on what r you want to move away
changey = changex * gradient) %>%
ungroup
# If this is gonna be positive or negativeeeeee GOES HERE
# Create the new coordinates for the end of the line
df %>%
mutate(!!sym(paste0("x-", newcoordsname)) := !!sym(centroidcoords[1]) + changex,
!!sym(paste0("y-", newcoordsname)) := !!sym(centroidcoords[2]) + changey)
}
createline <- function(df, crs = 27700, ...) {
makeline3 <- function(...) {
# dots must be divisible vy 4
dots <- unlist(list(...))
st_linestring(matrix(dots, length(dots) / 2, 2))
}
# Create actual linestring - to be passed to pmap
dots <- unlist(list(...))
#print(dots)
cols <- c(paste0("x-",dots), paste0("y-", dots))
#print(cols)
# Create the line by passing through centroid coords and new coords
reduceddf <- df %>%
select(cols)
repeats <- paste0("..", seq(1,length(dots)*2 -1, 1), ",", collapse = "")
repeats2 <- paste0(repeats,"..",length(dots)*2)
f <- paste0("pmap(reduceddf, ~makeline3(",repeats2,"))", collapse = "")
t <- as.lazy(f, environment())
lazy_eval(t) %>%
st_as_sfc(crs = crs)
}
findoverlap <- function(linesdf, buildingsdf) {
# Note these should be in order by their ID's (row 1 from lines = row 1 from buildings)
# Intersection function for pmap (could do anon function tbh)
intersects <- function(geom1, geom2) {
if (st_intersects(geom1, geom2, sparse = F)) {
st_intersection(geom1, geom2)
} else {
geom1
}
}
# This assumes that building geometry is in polygon form and must be converted to linestrings
intersectionpoints <- pmap(.l = list(linesdf, st_cast(buildingsdf, "MULTILINESTRING")$geometry), .f = intersects) %>%
st_as_sfc(crs = 27700)
# is.na(st_dimension(intersectionpoints))
intersectionpoints
}
#
# shapeinfo <- loadin(buildingpath = "H:/Projects/PDaS/PipeAges/AssetShape/Belgrave St Barts Academy Buildings.shp",
# n24path = "H:/Projects/PDaS/PipeAges/AssetShape/Belgrave St Barts Academy N24.shp",
# crs = 27700)
#
#
# # Get centroid to sewer line
# centroidtosewer <- findnearestneighbourandgetline(centroids = shapeinfo[["Centroids"]],
# n24pipes = shapeinfo[["n24"]],
# crs = 27700)
#
#
# centroidtosewercoords <- extractcoordinates(spatialdf = centroidtosewer)
#
# # First intercept with polygon
# newlinecoords <- createnewlinecoords(df = centroidtosewercoords)
# newlinecoords$buildingcross <- findoverlap(linesdf = createline(newlinecoords, crs = 27700, "1", "2"), buildingsdf = shapeinfo[["Buildings"]])
#
# newlinecoords$buildingcross <- st_as_text(newlinecoords$buildingcross)
# maindf <- extractcoordinatesnongeom(df = newlinecoords, pointnames = c("buildcross-1"), extractcol = "buildingcross")
#
# # Second intercept with polygon
#
# newlinecoords2 <- createnewlinecoords(df = centroidtosewercoords, invgrad = T, newcoordsname = "newcoords2")
# newlinecoords2$buildingcross <- findoverlap(linesdf =
# createline(newlinecoords2, 27700, "1", "newcoords2"),
# buildingsdf = shapeinfo[["Buildings"]])
# newlinecoords2$buildingcross <- st_as_text(newlinecoords2$buildingcross)
# maindf <- maindf %>%
# bind_cols(extractcoordinatesnongeom(df = newlinecoords2, pointnames = c("buildcross-2"), extractcol = "buildingcross") %>%
# select_at(vars(contains("buildcross-2"))))
#
# # Draw newa dogs leg
#
# # Need to be able to add 2 on to the length (crossover + 2)
#
# maindf <- createnewlinecoords(df = maindf, gradcoords = c("x-1","x-2","y-1","y-2"), invgrad = F, centroidcoords = c("x-buildcross-1", "y-buildcross-1"), newcoordsname = "3", length = 2)
# maindf <- createnewlinecoords(df = maindf, gradcoords = c("x-1","x-2","y-1","y-2"), invgrad = T, centroidcoords = c("x-buildcross-2", "y-buildcross-2"), newcoordsname = "perp", length = 2)
#
# maindf <- maindf %>%
# mutate(`x-4` = `x-3` + (`x-3` - `x-perp`),
# `y-4` = `y-3` + (`y-3` - `y-perp`))
#
#
#
#
#
#
#
#
# linescalculated <- createline(df = maindf, crs = 27700, "1", "3", "4")
#
#
#
# ## PLOTTING CHECKS
#
#
# st_crs(linescalculated) <- 27700
#
# #st_write(linescalculated, "wtfisgoingon.shp")
#
# # Check by doing a colour coded plot
# #plot(linescalculated[1:dim(linescalculated)[1], ])
#
# # This allows tmap to work
# tmap_mode('view')
# # This plots it onto tmap with the correct basemap
# qtm(st_cast(shapeinfo[["Buildings"]][272,], "MULTILINESTRING"),
# basemaps = "Esri.WorldStreetMap")
#
# qtm(linescalculated,
# basemaps = "Esri.WorldStreetMap")
|
b16a07da791f1df9de341a3cc6dc03831127b795 | 71ad5585118ca60ad690eb3ede66562a01cd4b16 | /lab-scripts/ordinal-logistic-regression-lab.R | dcdc3079efe20de2b5a6ff9ce7a7992796b4d46a | [
"MIT"
] | permissive | depocen/post8000 | 3c1983a7718c218cd6029d5ff82a5d810127db22 | 56f4d557d98ac10e9191758933debb86366d9db3 | refs/heads/master | 2023-04-09T16:59:36.514644 | 2021-04-20T20:54:59 | 2021-04-20T20:54:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,215 | r | ordinal-logistic-regression-lab.R | #' ---
#' title: "Ordinal Logistic Regression"
#' author: Steven V. Miller, [svmiller.com](http://svmiller.com)
#' date: 1 April 2021
#' abstract: "This is a lab script for [POST 8000](http://post8000.svmiller.com), a graduate-level quantitative methods for public policy class that I teach at Clemson University. It will not be the most sophisticated R-related write-up of mine---check [my blog](http://svmiller.com/blog) for those---but it should be useful for discussion around the associated R script for the week's 'lab' session."
#' output:
#' html_document:
#' css: lab-script.css
#' toc: TRUE
#' toc_float:
#' collapsed: false
#' smooth_scroll: false
#' highlight: zenburn
#' ---
#'
#' # R Packages/Data for This Session
#'
#' I'm pretty sure we've yet to install the `{ordinal}` package, which I think is the best R package for handling ordinal models of all walks. The `{MASS}` package has a
#' `polr()` function, and lots of other goodies, but a few things in it conflict with my preferred workflow. Plus, I think `{ordinal}` just has more goodies for ordinal models.
#' I've already installed it, but let's wrap it in a simple function that will install it on your end if you've yet to install it. As always, `{tidyverse}` has our main
#' workflow functions and `{stevedata}` has our data.
if_not_install <- function(packages) {
new_pack <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new_pack)) install.packages(new_pack)
}
if_not_install(c("ordinal", "tidyverse", "stevedata"))
#' Let's load the packages now.
library(tidyverse, quietly = TRUE)
library(stevedata)
library(ordinal) # Notice the function conflict here. The price of doing business
#' # Attitudes About Spending in the GSS
#'
#' Let's revisit an old data frame from earlier in the semester. You've seen these before. If you don't remember, they're attitudes recorded in the 2018
#' General Social Survey about attitudes toward various spending programs in the United States. The bulk of the spending items are coded such that -1 = the
#' respondent thinks we're spending too much on this particular topic, 0 = the respondent thinks the U.S. is spending "about (the) right" amount, and 1 = the
#' responding thinks the country is spending too little on the topic. Conceptually, I think of these items as communicating attitudes about support for more
#' spending on an ordered categorical scale. Higher values = a respondent implicitly thinking the U.S. should spend more on these topics.
gss_spending
?gss_spending
#' # Estimating an Ordinal Logistic Regression Model
#'
#'
#' Last year's script, in the interest of full disclosure, did this kind of stream of consciousness. So, I already know what kind of weirdness I can
#' expect from these models. No matter, let's plow forward. First, I want to do some recoding here. Let's create a simple dummy variable that
#' recodes the `degree` variable into a new one for those with a four-year college diploma. Let's make sure we drop the other party supporters
#' from the partisanship (`pid7`) variable. Finally, and this is important, let's declare some of these spending prompts to be ordered-categorical
#' variables with the `ordered()` function that comes in base R. We'll focus on two prompts---attitudes
#' toward spending on welfare (`natfare`) and attitudes about spending on social security (`natsoc`)---and 1) coerce both to be ordered-categorical
#' variables and 2) add them together and coerce *that* to be an ordered-categorical variable. Basically, the `{ordinal}` functions require a
#' dependent variable that is explicitly declared beforehand as an ordered factor. If you don't do this, the errors you get from the `{ordinal}`
#' function won't quite point you in this direction.
gss_spending %>%
mutate(collegeed = ifelse(degree >= 3, 1, 0),
pid7 = ifelse(partyid == 7, NA, partyid),
natfaresoc = natsoc + natfare,
natfare_f = ordered(natfare),
natsoc_f = ordered(natsoc),
natfaresoc_f = ordered(natfaresoc)) -> gss_spending
#' Let's assume we want to model attitudes toward welfare spending among white people as a function of these things:
#' age, sex (whether respondent is a woman), college education, income, partisanship (D to R), and ideology (L to C). You'd
#' do this with the `clm()` function in the `{ordinal}` package even as the syntax you see looks like every other regression
#' model you'd estimate in R.
M1 <- clm(natfare_f ~ age + sex + collegeed + rincom16 +
pid7 + polviews, data=subset(gss_spending, race == 1))
summary(M1)
#' Remember: interpreting coefficients in your "first step" is functionally identical to what you'd do from an OLS model. That is,
#' you're looking for sign of coefficients and statistical significance. Here would be the preliminary takeaways, none of which are
#' particularly surprising beyond the gender effect. In these data, the statistically significant effects are for women, ideology,
#' and partisanship and all three are negative. Women are less likely than me to think about spending more on welfare. The ideology
#' and partisanship effects are unsurprising if you have at least a cursory understanding on American politics.
#'
#' I tend to use very general language on coefficient interpretation for ordinal models, but if you want something more exact, here it is.
#' Observe the coefficient for `polviews` is ~-.269, which, you'll recall, is a logit.
#' Thus, the natural logged odds of observing a 1 versus a 0 or -1 decreases by about -.269 for a unit increase in the `polviews` variable.
#' Related: the natural logged odds of observing a 0 versus a -1 decreases by about -.269 for a unit increase in the `polviews` variable.
#'
#' ## A Love/Hate Comment on Thresholds in Ordinal Models
#'
#' I'm generally loathe to talk about these things. They're not typically parameters of interest for how you're probably using an ordinal model.
#' However, you'll want to provide them anyway. These thresholds or "cut points" are natural logged odds between two variables.
#' So, in this case: the "coefficient" reading -1|0 is the natural logged odds of being a -1 versus a 0 or 1.
#' The "coefficient" reading 0|1 is the natural logged odds of being a -1 or 0 versus a 1.
#' The "|" is kind of misleading, especially if you're used to it as a strict logical operator.
#' In this case, the "|" is like a cumulative cut point, or a way of saying it is.
#'
#' Let's talk a bit about what's happening here. We call ordinal logistic regression an extension of (binary) logistic regression because:
#'
#' 1. it's in spirit *multiple* (binary) logistic regressions of
#' 2. the natural logged odds of appearing in a category or below it.
#'
#' However, we are assuming the lines are in parallel to each other, separated by the thresholds. So, in this case, think of this model
#' as kind of like two logistic regressions, each with identical betas. `logit(p(y == -1)) = -2.87 + B*X` and `logit(p(y <= 0)) = -1.4221 + B*X`.
#'
#' ## Assessing the Proportional Odds Assumption
#'
#' You should, at least in spirit, care about the proportional odds assumption that the slopes are the same at every level.
#' There are any number of ways of testing this and I *really* wish there was a Brant test add-on for the `{ordinal}` package. There isn't
#' (i.e. it's there for the `polr()` function in `{MASS}`, which I eschewed here).
#'
#' Instead, you can do a nominal test, which is the `{ordinal}` package's way of saying "likelihood ratio test."
#' Think of this as a test of the hypothesis that relaxing the proportional odds (PO) assumption of parallel lines across all levels
#' of the response provides a better model fit. If the p < .05, you reject the hypothesis that relaxing the PO assumption does not improve model fit.
#' In other words, one or more of the covariates may have non-constant effects at all levels.
nominal_test(M1)
#' You can interpret the above output in a few ways:
#'
#' 1. You can use this as a call for a multinomial model. This might even be advisable in this context. Basically, while my brain sees these variables as
#' three-item ordered factors communicating implicit support for more government spending on this topic, the truth is there aren't many categories in the response.
#' Thus, it might be advisable to fit a multinomial logit model (i.e. the GLM for nominal dependent variables) because this is really an unstructured response with just three
#' categories awkwardly given to the respondent. We won't discuss the multinomial logit model
#' in class---the truth is I rarely see it in published work---but the model estimates the natural logged odds of being in one category versus some other
#' "baseline" response. Maybe it makes sense, in this context, to have the "about rights" as the baseline and assess the natural logged odds of being a
#' "too little" versus an "about right" or a "too much" versus an "about right."
#' 2. Alternatively, you can allow the effects of those coefficients that the nominal test flagged to vary at all levels. You can do this
#' by specifying a nominal call in the `clm()` function. Here, we'll do it just for age and sex.
M2 <- clm(natfare_f ~ collegeed + rincom16 + pid7 + polviews, nominal = ~ age + sex, data=subset(gss_spending, race == 1))
summary(M2) # Notice there's no single coefficient for age and sex. It's in the intercepts/thresholds.
nominal_test(M2)
#' Now, however, the nominal test is complaining about the college education variable. At this point, I'm probably just going to throw up my hands and say
#' "multinomial model" for this variable. But, as I mentioned at the top of the script, I wrote this kind of stream of consciousness and I suspect
#' it's the few response categories we have that's causing this issue. So, let's take out that `natfare_f` variable and add in the prompt that sums both
#' `natfare` and `natsoc` together. This creates a five-item variable where -2 = those who think we're spending too much on both welfare and social security
#' and 2 = those who think we're spending too little on both. -1, 0, and 1 are also both possible.
#'
#' Here'd be the breakdown for those.
gss_spending %>%
filter(race == 1) %>%
distinct(natfaresoc, natfare, natsoc) %>%
na.omit %>%
arrange(natfaresoc)
#' Now let's try this again with this new dependent variable that amounts to an index sentiment on whether the respondent thinks the U.S. needs to spend
#' more on social welfare programs, here gauged by prompts on welfare and social security.
# Let's try this again
M3 <- clm(natfaresoc_f ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1))
summary(M3)
#' We do see there's no longer a significant gender difference, but the college education variable emerges as negative and statistically significant.
#' The ideology and partisanship variables are the same, basically. More values in the dependent variable mean
#' there are more thresholds through we must sift. However, we're here for the nominal test. Did we pass?
nominal_test(M3)
#' Much betta.
#'
#' 
#'
#' ## Imposing Your Will on the Ordinal Model
#'
#' `r emo::ji("fire")` #take coming up: I'm of the mentality you should always run an ordinal logistic regression if that's the DV you're handed.
#' I will throw something at you if you try running an OLS on a five-item Likert because that's just not the data you have.
#' But I kind of hate them, and I would forgive you for hating them too, because communicating them is a chore.
#' OLS has a straightforward interpretation. Binary DVs are really straightforward as well.
#' However, the PO assumption can be restrictive and there are a lot of moving pieces from the model output. Your audience may not have the appetite for it.
#'
#' In other words, be prepared to communicate your statistical model graphically and impose your will on a somewhat unruly model accordingly.
#'
#' In the `{ordinal}` package, you can do this with the `predict()` function and think about using it with hypothetical data. For example,
#' let's create a simple data frame that has all our right-hand side values at their typical values. But, we'll allow partisanship to
#' vary across three types. These will be the strong Democrats (`pid7 == 0`), the pure independents who say they don't lean one way or the
#' other (`pid7 == 3`), and the strong Republicans (`pid7 == 6`).
newdat <- tibble(age = median(gss_spending$age, na.rm=T),
collegeed = 0,
sex = 0,
pid7 = c(0, 3, 6),
polviews = median(gss_spending$polviews, na.rm=T),
rincom16 = median(gss_spending$rincom16, na.rm=T))
newdat # who dis
#' Thus, think of three types of people: a typical-aged man of average income, without a college diploma, and who says they're ideologically moderate. They're identical,
#' but for their partisanship. One is a strong Democrat, another an independent, and the last a Republican. We want to know what the effect of increasing
#' partisanship "looks like" for these three people across the handful of different responses recorded in the dependent variable. For simplicity's sake,
#' we're going to focus on that first model that looked at just attitudes about welfare, even acknowledging the model wasn't a super great fit for the data.
#'
#' You've been warned: this code is convoluted as hell. It's why I prefer Bayes for ordinal models, but Bayes is in two weeks.
# Oh god, here we go...
predict(M1, newdata = newdat, se.fit=T) %>% # get predictions with standard errors.
# This is a list of two matrices
# Let's coerce it to two data frames while also begrudging that I have to do this.
map(~as.data.frame(.)) %>% # god purrr is awesome
# There's a hiden rowname in here. It's going to somewhat coincide with the values of pid7
# Let's extract it
map(~rownames_to_column(.)) %>%
# Now let's make these two data frames into one data frame.
# Importantly, obj is going to tell me whether it's a prediction or a standard error around the prediction
map2_df(names(.), ~mutate(.x,obj=.y)) %>%
# alrightie... okay. See that rowname variable? I know that's the pid7 values of 0, 3, and 6.
# However, the clm predict doesn't save those. Let's tell them for what they are.
rename(pid7 = rowname) %>%
# It also delightfully thinks it's a character. So, let's humor it and overwrite it.
mutate(pid7 = rep(c("Strong Democrat", "Independent", "Strong Republican"), 2),
# Make it a factor in order it appears. You'll thank me later for this.
pid7 = forcats::fct_inorder(pid7)) %>%
# okay, tidyr::gather() is going to have to do some heavy lifting here.
gather(var, val, -pid7, -obj) %>%
# Importantly, I needed this longer because I want my -1, 0, and 1s (as responses) to be "long."
# so, now this made it "longer" while still giving me a glimpse as to what's my fit and what's my se.fit
# See that's in the obj column? Let's group_split and bind_cols to get them next to each other
group_split(obj) %>%
bind_cols() %>%
# voila! I have everything I need now
# however, I'll need to rename things and focus on just what I want
rename(pid7 = `pid7...1`,
natfare = `var...3`,
fit = `val...4`,
se = `val...8`) %>%
select(pid7, natfare, fit, se) %>%
# Now, let's have some fun and create a column called upr and lwr creating bounds around the estimate
mutate(upr = fit + 1.96*se,
lwr = fit - 1.96*se) %>%
ggplot(.,aes(pid7, fit, ymax=upr, ymin=lwr)) +
geom_pointrange() +
# Oh god help me I never do anything the easy way...
facet_wrap(~natfare, labeller=labeller(natfare = c("-1" = "Spends Too Much",
"0" = "Spending About Right",
"1" = "Spending Too Little"))) +
labs(title = "Attitudes Toward Spending on Welfare, by Partisanship",
x = "Partisanship", y = "Predicted Probability of the Response (with 95% Intervals)",
caption = "Source: General Social Survey, 2018. Note: for pedagogical use in my grad methods class. Stay out of my mentions.",
subtitle = "Increasing GOP partisanship increases the likelihood of the spend too much or spend about right response, but decreases the likelihood of the\nspend too little response. You knew this.")
#' ^ Consider this a preview for the quantities of interest week, that's coming up next. Basically: regression modeling is story-telling as well, in a way.
#' You, the story-teller, just have more work to do with ordinal models, even as the ordinal model may faithfully capture the underlying distribution of the DV.
#'
#' # When Can You Jettison the Ordinal Model for OLS?
#'
#'
#' I want to give you an "out", of a kind. The truth is OLS models are a better fit on ordered-categorical data than they are on dummy variables. What follows
#' will touch on some of the readings you had this week (and even earlier in the semester) on whether you can treat your ordinal DV as continuous. Here's
#' my rule of thumb:
#'
#' - **3-4**: basically, no. Don't do it. You have so few responses that the OLS model just isn't going to return a quantity of interest that I or the audience
#' should care to know.
#' - **5-7**: others do this. I don't, but I would say to use the OLS as a "first cut" to assess if there's a "there there", then finish with the ordinal model. Think of the
#' kind of data you have in, say, a five-item ordered categorical variable. Think of a Likert, for example. The ordinal model can tell you, with some work,
#' the probability of being a "strongly disagree", a "neither agree nor disagree", and a "strongly agree." Those are quantities of interest that kind of present themselves
#' in these applications. The ordinal model can help you with those. The OLS model really can't. The sign and significance may be unchanged, but that's also not the point.
#' - **8+**: f*ck it, just go for it, provided there's no natural clumping of responses on some extreme in the distribution. Here'd be the more thorough interpretation. With
#' more values on a still finite scale, you can start to think of the differences as "equally spaced out" where the observed responses rest on a continuum that makes a bit
#' more sense. The OLS model is still informative, if technically wrong. In our lecture, I showed how it performed okay with simulated data, even if it was discernibly
#' off the true parameters (and that was for a five-item response variable). No one is going to give you too much grief and I won't either,
#' but you may want to consider some form of robust standard error correction to be safe.
#'
#' ^ On the above point in the distribution of responses on a granular ordinal scale. Remember the bribe-taking prompt from the World Values Survey?
#' This was the justifiability of taking a bribe on a 1-10 scale. It has 10 responses, but almost all of them are at 1. In other words, don't treat that as interval below:
wvs_justifbribe %>%
group_by(f117) %>%
count() %>%
na.omit %>%
ggplot(.,aes(as.factor(f117), n)) +
geom_bar(stat="identity", alpha=0.8, color="black") +
scale_x_discrete(labels=c("Never Justifiable", "2", "3", "4",
"5", "6", "7", "8", "9", "Always Justifiable")) +
scale_y_continuous(labels = scales::comma) +
geom_text(aes(label=n), vjust=-.5, colour="black",
position=position_dodge(.9), size=4) +
labs(y = "Number of Observations in Particular Response",
x = "",
title = "The Justifiability of Taking a Bribe in the World Values Survey, 1981-2016",
caption = "Data: World Values Survey (1981-2016), via ?wvs_justifbribe in {stevedata}",
subtitle = "There are just 10 different responses in this variable with a huge right skew.")
#' You may not even want to think of it as ordinal. With noisy as hell data like this, as I mentioned in that session, you'll probably just want to embrace
#' the noisiness and estimate it as a binary DV of 1 versus not 1.
#'
#' What about our dependent variable from model 3?
summary(M3)
summary(M4 <- lm(natfaresoc ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1)))
broom::tidy(M3) %>%
filter(coef.type != "intercept") %>%
select(-coef.type) %>%
mutate(model = "Ordinal Logistic Regression") %>%
bind_rows(., broom::tidy(M4) %>% mutate(model = "OLS")) %>%
arrange(term)
#' ^ off, technically wrong, but not the worst I've ever seen. In fact, those *t*/*z* statistics look very similar even as the underlying coefficients are
#' being communicated on different scales. I'd still say to jettison OLS for the ordinal logistic regression here. Do it for the reasons I hinted
#' at above. If you have just five responses in the DV, I'm probably going to want to know about the extremes and the middle. There are a lot of moving pieces in an ordinal
#' model, but you can focus on just those responses that almost naturally present themselves in this setup. Those who advocating slapping an OLS sticker on all types
#' insist it does well enough being BLUE. My retort to that is 1) I'm not convinced it's doing that and 2) with so few response categories, OLS is going to
#' struggle in an obvious way providing reasonable fitted values. Ordinal logistic regression is tailored for communicating probabilities (albeit with
#' some work) for finite values. OLS can't do that.
#'
#' What about something bigger, like the `sumnatsoc` variable in the `gss_spending` data? Whereas Model 3 adds just the two prompts together, this sums
#' all responses toward various "social" prompts about the environment, health, dealing with drug addiction, education, improving racial equality, welfare,
#' roads, mass transit, parks, social security, and child care.
#'
#' Here's what this variable would look like, all 22 possible responses of it. There's a bit of a left tail for the anti-government-doing-anything folk, but this has
#' a nice juicy center for a variable with just 22 different responses.
gss_spending %>%
filter(race == 1) %>%
group_by(sumnatsoc) %>%
tally() %>%
ggplot(.,aes(ordered(sumnatsoc), n)) +
geom_bar(stat="identity")
#' Now, let's compare the ordinal model with the OLS model.
M5 <- clm(ordered(sumnatsoc) ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1))
M6 <- lm(sumnatsoc ~ age + sex + collegeed + rincom16 + pid7 + polviews, data=subset(gss_spending, race == 1))
broom::tidy(M5) %>%
filter(coef.type != "intercept") %>%
select(-coef.type) %>%
mutate(model = "Ordinal Logistic Regression") %>%
bind_rows(., broom::tidy(M6) %>% mutate(model = "OLS")) %>%
arrange(term)
#' Similar performance. No one is going to yell too much at you for doing an OLS on a technically ordinal item that has like 22 different values.
#' But, maybe consider some kind of robust standard error correction.
|
3189e08ba1a76aaa19b60a3c099bec57639fb884 | 143fa3e56d6564fdccfa6b731ebe428241ee09dd | /tests/testthat/test-data-survey.R | 222a9a7d059074ad28b0f2722d5b62cbf6471ef6 | [
"MIT"
] | permissive | mrc-ide/naomi1 | 3b5484b84008cda992bab6473fdac2e895b164ca | 2cb981b9370f50641d3a8c4bc6d338e56e43877b | refs/heads/master | 2023-01-09T03:51:02.965022 | 2020-11-06T13:24:01 | 2020-11-06T13:24:01 | 310,593,762 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,185 | r | test-data-survey.R | context("test-data-survey")
test_that("cmc_date() returns correct value", {
expect_equal(cmc_date(as.Date("1987-02-11", format = "%Y-%m-%d")), 1046)
})
test_that("cmc_date() returns an error if provided non-Date argument", {
expect_error(cmc_date("foo"))
expect_error(cmc_date(1046))
expect_error(cmc_date("1987-02-11"))
})
test_that("get_mid_calendar_quarter() returns correct value", {
start <- c("2005-04-01", "2010-12-15", "2016-01-01")
end <-c("2005-08-01", "2011-05-15", "2016-06-01")
expect_equal(get_mid_calendar_quarter(start, end),
c("CY2005Q2", "CY2011Q1", "CY2016Q1"))
})
test_that("get_mid_calendar_quarter() returns error if arguments not Date", {
expect_error(get_mid_calendar_quarter("2016-01-01", NA),
"!is.na\\(end_date\\) is not TRUE")
expect_error(get_mid_calendar_quarter("2016-01-01", "jibberish"),
"character string is not in a standard unambiguous format")
expect_error(get_mid_calendar_quarter(NA, "2016-01-01"),
"!is.na\\(start_date\\) is not TRUE")
expect_error(get_mid_calendar_quarter("2016-01-01", "2015-12-01"),
"start_date <= end_date is not TRUE")
})
|
50e84ded84cd0a1d0840607663784d11013bf468 | 514ef1a621fba60bf920a884b2f37d3be945ba4f | /combing_night_files.R | 799571d23cf64715b40fd0d6984fa407fe0d82e0 | [] | no_license | aurielfournier/R_in_ecology | 478666737b012d8bacdac657d2239accb6a7ccbf | 05c8a96664f5bbaa4848af4f5fd1f703f8157fbd | refs/heads/master | 2021-01-10T14:02:17.148510 | 2016-02-02T00:49:10 | 2016-02-02T00:49:10 | 50,205,895 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,015 | r | combing_night_files.R |
file_names <- list.files(path="./R_in_ecology/night_files/",pattern=".csv")
# these are highly subseted files from my dissertation, I realize that latitude typically has longitude with it, but, this is all I'm using for this example.
library(tidyr)
library(dplyr)
library(auriel)
nights <- list()
for(i in 1:length(file_names)){
dat <- as.data.frame(file_names[i])
colnames(dat) <- "name"
names <- dat %>% separate(name, into=c("year","month","day","obs","round","region","area","impound","treat","night"),sep="_")
names <- names %>% separate(night, into=c("night","file"), sep=-5)
int <- read.csv(paste0("./R_in_ecology/night_files/",file_names[i]))
lesscol <- int[,c("lat","name")]
lesscol$name <- as.character(lesscol$name)
lesscol$name <- ifelse(nchar(lesscol$name)==7,paste0(lesscol$name,"N"),lesscol$name)
lesscol <- lesscol %>% separate(name, into=c("name","distance"),sep=5) %>% separate(distance, into=c("species","distance"), sep=1) %>% separate(distance, into=c("distance","flush_walk"), sep=-2)
lesscol$distance <- as.numeric(lesscol$distance)
lesscol$species <- tolower(lesscol$species)
lesscol$year <- as.numeric(names$year)
lesscol$month <- as.numeric(names$month)
lesscol$day <- as.numeric(names$day)
lesscol$obs <- names$obs
lesscol$round <- names$round
lesscol$region <- names$region
lesscol$area <- names$area
lesscol$impound <- names$impound
lesscol$treat <- names$treat
lesscol$night <- names$night
lesscol$odat <- ordinal_date_con(lesscol[,c("month","day","year")])
nights[[i]] <- lesscol
}
masterdat <- do.call(rbind, nights)
# how many rails have we seen now?
nrow(masterdat)
# how about broken down by me and my tech, because I am competitive
table(masterdat$obs, masterdat$species)
# how about by round
table(masterdat$round)
# and region
table(masterdat$region)
# how about how many are flushing vs walking when observed (N means not recorded, which is uninteresting)
table(masterdat[masterdat$flush_walk!="N",]$flush_walk)
|
ee2e5e9da3e570d75a862d02f713e1f17eec08fd | a539a580b2cfc5a19002e1ce94c8746c82e16efe | /tests/testthat/test-column.R | acef394e7ce0cd254cdcbbd7c192c83458125fdf | [] | no_license | craiggrabowski/filterr | 4bb2ed03383f7d02579d4bd89362c4997ed70c5a | 61bc35c7b44afbe61241271027d8eda272698026 | refs/heads/master | 2021-09-07T02:46:27.221736 | 2017-11-08T04:43:57 | 2017-11-08T04:43:57 | 108,204,558 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 818 | r | test-column.R | library(filterr)
context("column")
test_that("column returns a column object", {
f <- function(x) expect_true(is_column(column(x)))
xx <- list(
"x",
"y"
)
lapply(xx, f)
})
test_that("column converts to character as field name", {
f <- function(x) expect_equal(
as.character(column(x)),
x
)
xx <- list(
"x"
)
lapply(xx, f)
})
test_that("column prints as character", {
f <- function(x) expect_output(print(column(x)), x)
xx <- list(
"x"
)
lapply(xx, f)
})
test_that("as.column is column for non-columns", {
f <- function(x) expect_equal(as.column(x), column(x))
xx <- list(
"x"
)
lapply(xx, f)
})
test_that("as.column is identity for columns", {
f <- function(x) expect_equal(as.column(x), x)
xx <- lapply("x", column)
lapply(xx, f)
})
|
6eac3dfc1d81807773bcde9ead5d90be1af88323 | ee1b86317331e288b06e0d409a2f6eb35201d7ee | /server.R | bd9658f22dbc1942e0ba7b9ae1022fc028d26e1e | [] | no_license | usilva12/distribucion-viajes-linea2-trolebus-merida | b4bb27fef2f818aeb361bd9e87f5876155ae6f0d | d03ccf9c90b104c200de5d01c960e2d626a536aa | refs/heads/master | 2021-06-17T16:37:36.590219 | 2017-05-27T21:34:17 | 2017-05-27T21:34:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,765 | r | server.R | library(readr)
library(igraph)
source('matriz_viajes.R', local = TRUE)
source('calculo_rangos.R', local = TRUE)
viajesData <- read_csv("~/Documentos/odiseo/r-shiny/time-range-slider/viajes.csv")
conteoBuses <- read_csv("~/Documentos/odiseo/r-shiny/time-range-slider/conteo_buses.csv")
source('inicializar_nodos.R', local = TRUE)
flujo <- NULL
function(input, output, session) {
observe({
opcionesNodos = c()
for (i in 1:numNodos) {
opcion = paste("Nodo ", i)
opcionesNodos = c(opcionesNodos, opcion)
}
updateSelectInput(session, "seleccionNodos", choices = opcionesNodos)
})
esRango <- reactive({
desde <-as.character.POSIXt(input$rangoTiempo[1], format = "%I:%M")
hasta <- as.character.POSIXt(input$rangoTiempo[2], format = "%I:%M")
return(desde != hasta)
})
viajesInfo <- reactive({
if (esRango()) {
rango <- calcularRango(input)
origenes <- c()
destinos <- c()
for (i in 1:numNodos){
origenes[i] <- sum(viajesData[rango$desde:rango$hasta, i*2])
destinos[i] <- sum(viajesData[rango$desde:rango$hasta, i*2 + 1])
}
porcentajeCobertura <- input$coberturaTransportePublico/100
porcentajeAmento <- input$aumentoUsoTransporte/100
factorAumento <- porcentajeCobertura * (1 + porcentajeAmento)
origenes <- ceiling(origenes * factorAumento)
destinos <- ceiling(destinos * factorAumento)
viajes <- matrizViajes(origenes, destinos)
viajesMarco <- viajes
nombres = c()
for (i in 1:numNodos) {
nombre = paste("Nodo ", i)
nombres = c(nombres, nombre)
}
colnames(viajesMarco) <- nombres
rownames(viajesMarco) <- nombres
return(list(
viajes = viajes,
origenes = origenes,
destinos = destinos,
viajesMarco = viajesMarco
))
}
})
output$leyendaMatriz <- renderText({
if (esRango()) {
paste("<b><font size=\"3\">Matriz de Viajes ",
as.character.POSIXt(input$rangoTiempo[1],
format = "%I:%M%p"),
" - ",
as.character.POSIXt(input$rangoTiempo[2],
format = "%I:%M%p"),
"</font></b>")
}
})
output$matrizViajes <- renderTable({
viajesInfo()$viajesMarco
}, rownames = TRUE, colnames = TRUE,
digits = 0, width = "100%")
output$tituloFlujos <- renderText({
if (esRango()) {
paste("<b><font size=\"3\">Flujo entre pares consecutivos de nodos ",
as.character.POSIXt(input$rangoTiempo[1],
format = "%I:%M%p"),
" - ",
as.character.POSIXt(input$rangoTiempo[2],
format = "%I:%M%p"),
"</font></b>")
}
})
source('dibujar_grafo_total.R', local = TRUE)
output$grafoCompleto <- renderPlot({
if (esRango()) {
plot(grafoCompleto(),
edge.label = paste(E(grafoCompleto())$suma),
vertex.label.dist=0,
vertex.label.cex=2,
vertex.size = 20,
edge.width = E(grafoCompleto())$suma*0.0018,
edge.curved= T,
edge.loop.angle=pi/4,
edge.loop.angle2=pi/4,
edge.arrow.size = 1.5,
edge.label.cex = 1.2,
layout=layout.circle)
title(main = "Red de Flujo Linea 2")
}
}, height = 700, width = 700)
source('dibujo_grafos.R', local = TRUE)
output$grafoSubida <- renderPlot({
if (esRango()) {
grafo <- grafos()$grafoSubiendo
plot.igraph(grafo,
edge.label = paste(E(grafo)$FlujoS),
vertex.label.dist = 0,
vertex.label.cex = 2,
vertex.size = 35,
vertex.color = 'dodgerblue',
edge.width = E(grafo)$FlujoS/quantile(E(grafo)$FlujoS)[2],
edge.curved = 0,
edge.arrow.size = 2,
edge.label.cex = 1.2
)
title(main = "Sentido Centro - La Hechicera")
}
})
output$grafoSubidaRedim <- renderUI({
plotOutput("grafoSubida", height = 700, width = 700)
})
output$grafoBajada <- renderPlot({
if (esRango()) {
grafo <- grafos()$grafoBajando
plot.igraph(grafo,
edge.label = paste(E(grafo)$FlujoB),
vertex.label.dist = 0,
vertex.label.cex = 2,
vertex.size = 35,
vertex.color = 'dodgerblue',
edge.width = E(grafo)$FlujoB/quantile(E(grafo)$FlujoB)[2],
edge.curved = 0,
edge.arrow.size = 2,
edge.label.cex = 1.2
)
title(main = "Sentido La Hechicera - Centro")
}
})
output$grafoBajadaRedim <- renderUI({
plotOutput("grafoBajada", height = 700, width = 700)
})
buses <- reactive({
seleccion <- input$seleccionNodos
nodo <- c()
for (i in 1:numNodos) {
if (seleccion == paste("Nodo ", i)) {
nodo <- i
break
}
}
rango <- calcularRango(input)
buses <- c()
busesSubida <- conteoBuses[rango$desde:rango$hasta, i*2]
busesSubida <- as.vector(as.matrix(busesSubida))
busesBajada <- NULL
if (i != 8) {
busesBajada <- conteoBuses[rango$desde:rango$hasta, i*2 + 1]
busesBajada <- as.vector(as.matrix(busesBajada))
}
intervalos <- conteoBuses[rango$desde:rango$hasta, 1]
return(list(
busesSubida = busesSubida,
busesBajada = busesBajada,
intervalos = as.vector(as.matrix(intervalos)),
rango = rango
))
})
output$busesSubida <- renderPlot({
if (esRango()) {
ejeX <- buses()$rango$desde:buses()$rango$hasta
plot(ejeX,
buses()$busesSubida,
type = "o",
xlab = "",
ylab = "Número de buses",
axes = FALSE)
axis(2)
axis(1,
at = ejeX,
labels = buses()$intervalos,
las = 2)
box()
title(main = "Conteo de buses en sentido Centro - La Hechicera")
}
})
output$busesBajada <- renderPlot({
if (esRango() & !is.null(buses()$busesBajada)) {
ejeX <- buses()$rango$desde:buses()$rango$hasta
plot(ejeX,
buses()$busesBajada,
type = "o",
xlab = "",
ylab = "Número de buses",
axes = FALSE)
axis(2)
axis(1,
at = ejeX,
labels = buses()$intervalos,
las = 2)
box()
title(main = "Conteo de buses en sentido La Hechicera - Centro")
}
})
estadisticas <- reactive({
if (esRango()) {
rango <- calcularRango(input)
viajes <- viajesData[rango$desde:rango$hasta, 2:(2*numNodos+1)]
estadisticasOrigenes <- NULL
estadisticasDestinos <- NULL
for (i in 1:numNodos) {
origenes <- as.vector(as.matrix(viajes[, 2*i-1]))
destinos <- as.vector(as.matrix(viajes[, 2*i]))
estadisticasOrigenes <- rbind(estadisticasOrigenes, c(paste(i),
sum(origenes),
min(origenes),
max(origenes),
round(mean(origenes), 2),
round(var(origenes), 2),
round(sd(origenes), 2)
))
estadisticasDestinos <- rbind(estadisticasDestinos, c(paste(i),
sum(destinos),
min(destinos),
max(destinos),
round(mean(destinos), 2),
round(var(destinos), 2),
round(sd(destinos), 2)
))
}
nombresColumnas <- c("Nodo",
"Total",
"Mínimo",
"Máximo",
"Media",
"Varianza",
"Desviación Estándar")
colnames(estadisticasOrigenes) <- nombresColumnas
colnames(estadisticasDestinos) <- nombresColumnas
return(list(
origenes = estadisticasOrigenes,
destinos = estadisticasDestinos
))
}
})
output$tituloEstOrigenes <- renderText({
if (esRango()) {
paste("<b><font size=\"3\">Estadísticas sobre viajes de origen ",
as.character.POSIXt(input$rangoTiempo[1],
format = "%I:%M%p"),
" - ",
as.character.POSIXt(input$rangoTiempo[2],
format = "%I:%M%p"),
"</font></b>")
}
})
output$estadisticasOrigenes <- renderTable({
estadisticas()$origenes
}, width = "100%")
output$tituloEstDestinos <- renderText({
if (esRango()) {
paste("<b><font size=\"3\">Estadísticas sobre viajes de destino ",
as.character.POSIXt(input$rangoTiempo[1],
format = "%I:%M%p"),
" - ",
as.character.POSIXt(input$rangoTiempo[2],
format = "%I:%M%p"),
"</font></b>")
}
})
output$estadisticasDestinos <- renderTable({
estadisticas()$destinos
}, width = "100%")
}
|
34d3922d34687b924fb07d8631aed08680d3c1cf | f5224269ceced4aaeb094a2a16096794c9ce2761 | /SARS-CoV-2_NCATS_ACTIV_Combo/scripts/3.1_fit_drc_dose_response.R | de2d0fc31ce135cc44fe4a61172efa61f5a2e8f9 | [
"MIT"
] | permissive | jilimcaoco/MPProjects | 2842e7c3c358aa1c4a5d3f0a734bb51046016058 | 5b930ce2fdf5def49444f1953457745af964efe9 | refs/heads/main | 2023-06-15T04:00:46.546689 | 2021-06-29T02:57:46 | 2021-06-29T02:57:46 | 376,943,636 | 0 | 0 | MIT | 2021-06-29T02:57:47 | 2021-06-14T20:08:32 | null | UTF-8 | R | false | false | 5,663 | r | 3.1_fit_drc_dose_response.R |
library(plyr)
library(tidyverse)
library(MPStats)
well_scores <- readr::read_tsv("intermediate_data/well_scores.tsv")
single_agent_well_scores <- dplyr::bind_rows(
well_scores %>%
dplyr::filter(sample_label_2 == "DMSO") %>%
dplyr::mutate(
compound = sample_label_1,
log_dose = log10(dose1) - 9),
well_scores %>%
dplyr::filter(sample_label_1 == "DMSO") %>%
dplyr::mutate(
compound = sample_label_2,
log_dose = log10(dose2) - 9)) %>%
dplyr::mutate(
is_control = FALSE,
prob_positive = n_positive / count,
cell_count = count) %>%
dplyr::filter(compound != "DMSO")
fit_drc_score_by_dose <- function(well_scores){
fits <- well_scores %>%
plyr::ddply(c("compound"), function(curve_data){
tryCatch({
# weights <- 1/MPStats:::binomial_variance(curve_data$n_positive, curve_data$cell_count, prior_positive=10, prior_negative=10)
# weights <- weights * nrow(curve_data)/sum(weights)
fit <- drc::drm(
formula=prob_positive ~ log_dose,
# weights=weights,
data=curve_data,
fct=drc::L.4(fixed=c(NA, NA, NA, NA)))
log_dose <- seq(min(curve_data$log_dose), max(curve_data$log_dose), length.out=100)
pred_value <- predict(fit, expand.grid(log_dose, 1))
test_no_fit <- drc::noEffect(fit) %>% data.frame
data.frame(log_dose, pred_value) %>%
dplyr::mutate(
slope=fit$coefficients[1],
bottom=0,
top=fit$coefficients[2],
ic50=fit$coefficients[3],
chi_squared_test = test_no_fit$.[1],
degrees_of_freedom = test_no_fit$.[2],
p_value = test_no_fit$.[3] %>% signif(2)) %>%
return()
}, error=function(e){
cat("ERROR: Failed to fit curve for compound: ", curve_data$compound[1], "\n", sep="")
cat("ERROR: ", e$message, "\n", sep="")
return(data.frame())
})
})
}
fits <- single_agent_well_scores %>%
fit_drc_score_by_dose()
poisson_quantile <- function(count, p){
qgamma(p=p, shape=sum(count), rate=length(count))
}
plot_drc_score_by_dose <- function(well_scores, fits, subtitle=NULL){
compound_dose_scores <- well_scores %>%
dplyr::filter(!is_control) %>%
dplyr::group_by(log_dose, compound) %>%
dplyr::summarize(
n_positive = sum(n_positive),
cell_count = sum(cell_count),
prob_positive = n_positive/cell_count,
prob_positive_low = MPStats::binomial_quantile(n_positive, cell_count, .025),
prob_positive_high = MPStats::binomial_quantile(n_positive, cell_count, .975)) %>%
dplyr::ungroup()
# mean and 95% credible intervals for cell count by compound dose on the sqrt scale
compound_cell_count <- well_scores %>%
dplyr::filter(!is_control) %>%
dplyr::group_by(log_dose, compound) %>%
dplyr::summarize(
mean = cell_count %>% mean %>% sqrt,
low = poisson_quantile(cell_count, .025) %>% sqrt,
high = poisson_quantile(cell_count, .975) %>% sqrt) %>%
dplyr::ungroup()
compound_cell_count_scale_factor <- compound_cell_count$high %>% max * 1/.13
compound_cell_count <- compound_cell_count %>%
dplyr::mutate(
scaled_mean = mean / compound_cell_count_scale_factor,
scaled_low = low / compound_cell_count_scale_factor,
scaled_high = high / compound_cell_count_scale_factor)
p <- ggplot2::ggplot() +
ggplot2::theme_bw() +
# cell counts
ggplot2::geom_smooth(
data=compound_cell_count,
mapping=ggplot2::aes(
x=log_dose,
y=scaled_mean),
color="green",
size=1.5,
method="loess",
se=FALSE) +
ggplot2::geom_errorbar(
data=compound_cell_count,
mapping=ggplot2::aes(
x=log_dose,
ymin=scaled_low,
ymax=scaled_high),
color="darkgreen") +
ggplot2::geom_point(
data=compound_cell_count,
mapping=ggplot2::aes(
x=log_dose,
y=scaled_mean),
color="darkgreen") +
# scores
ggplot2::geom_line(
data=fits,
mapping=ggplot2::aes(
x=log_dose,
y=pred_value),
color="blue",
size=1.5) +
ggplot2::geom_errorbar(
data=compound_dose_scores,
mapping=ggplot2::aes(
x=log_dose,
ymin=prob_positive_low,
ymax=prob_positive_high),
color="darkblue") +
ggplot2::geom_point(
data=compound_dose_scores,
mapping=ggplot2::aes(
x=log_dose,
y=prob_positive),
color="darkblue") +
# indicators
geom_indicator(
data=fits %>% dplyr::distinct(compound, p_value),
mapping=ggplot2::aes(
indicator=paste0("fit: ", p_value)),
xpos="left",
ypos="top",
group=1) +
ggplot2::ggtitle(
label="Score by log dose",
subtitle=subtitle) +
ggplot2::scale_x_continuous(
"log[Compound dose] (uM)") +
ggplot2::scale_y_continuous(
"Score",
limits=c(0,.13),
labels=scales::percent_format(),
sec.axis = ggplot2::dup_axis(
name = "Cell Count",
breaks=c(0,10,20,30,40,50,60,70,80)/(compound_cell_count_scale_factor),
labels=c(0,100,400,900,1600,2500,3600,4900,6400))) +
ggplot2::facet_wrap(~compound, scales="free_x")
}
plot <- single_agent_well_scores %>%
plot_drc_score_by_dose(
fits = fits)
ggplot2::ggsave(
filename = "product/figures/single_agent_dose_response_20201120.pdf",
width = 6,
heigh = 4)
ggplot2::ggsave(
filename = "product/figures/single_agent_dose_response_20201120.png",
width = 6,
heigh = 4)
|
624623ed91c43c4e7ea337e0907bea8a041cc69d | 5292d2709b99226212ab2194fcd4a60bfaaea93f | /RAnalysis/Scripts/Resp_rate_calculation.R | 1d8a7ce8eeb35dd0b5082108132cb2538449bdee | [] | no_license | SamGurr/Juvenile_geoduck_OA | 307b233c5c616c532da9fe998d5ad96ae864a5f7 | 7e48e401fbc1277d3b7c106906e952fb19955c39 | refs/heads/master | 2020-04-13T09:46:30.041264 | 2020-01-24T14:52:16 | 2020-01-24T14:52:16 | 163,120,436 | 1 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 7,224 | r | Resp_rate_calculation.R | # Project: Juvenile_Geoduck_OA
# Title: Resp_rate_calculation
# Supported by: FFAR
# Author: Sam Gurr
# Date Updated: 20190410
# Contact: samuel_gurr@uri.edu
rm(list=ls())
# Install packages if not already in your library
if ("plyr" %in% rownames(installed.packages()) == 'FALSE') install.packages('plyr')
# Load packages and pacage version/date/import/depends info
library(plyr) # Version 1.8.4, Packaged: 2016-06-07, Depends: R (>= 3.1.0) Imports: Rcpp (>= 0.11.0)
setwd("C:/Users/samjg/Documents/My_Projects/Juvenile_geoduck_OA/RAnalysis/Data/SDR_data/All_data_csv") #set working directory
main<-getwd()
# path used in the loop to each file name indicated in Table_files (matrix of filenames created below)
path<-"Cumulative_output/" #the location of all your respiration files
# make a table for all the filenames to call
table_o_files <- data.frame(matrix(nrow = 10))
table_o_files$rownumber <- c(1,2,3,4,5,6,7,8,9,10)
table_o_files$name <- c("Cumulative_resp_reference_all.csv",
"Cumulative_resp_alpha0.2_all.csv", "Cumulative_resp_alpha0.2_min10-20.csv", "Cumulative_resp_alpha0.2_min10-25.csv",
"Cumulative_resp_alpha0.4_all.csv", "Cumulative_resp_alpha0.4_min10-20.csv", "Cumulative_resp_alpha0.4_min10-25.csv",
"Cumulative_resp_alpha0.6_all.csv", "Cumulative_resp_alpha0.6_min10-20.csv", "Cumulative_resp_alpha0.6_min10-25.csv")
table_o_files$matrix.nrow...10. <- NULL # delete the initial row
# make a dataframe to rbind the ouput
cumulative_respCALC <- data.frame(matrix(nrow = 384, ncol = 16)) # 384 is the size of each file (16*24)
# composed of cumulative LoLin regression analysis of 16 files with
# 24 measurements (rows) each ( 24-well SDR plate)
# names here are the same order as the imported files in table_o_filenames$name
colnames(cumulative_respCALC) <- c("Resp_individually_all.csv",
"LpcResp_alpha0.2_all.csv", "LpcResp_alpha0.2_min10-20.csv", "LpcResp_alpha0.2_min10-25.csv",
"LpcResp_alpha0.4_all.csv", "LpcResp_alpha0.4_min10-20.csv", "LpcResp_alpha0.4_min10-25.csv",
"LpcResp_alpha0.6_all.csv", "LpcResp_alpha0.6_min10-20.csv", "LpcResp_alpha0.6_min10-25.csv", # now has 10 columns for each file output
"Date", "SDR_pos", "RUN", "tank", "Day_trial", "Treat1_Treat2")
# OUTSIDE FOR LOOP
# About this loop: The following script is used to calculate and standardize respriration rate data as µg_O2_hr-1_mm.shell.length-1
# calls each target file in the data.frame created above "cumulative_respCALC"
for(i in 1:nrow(table_o_files)){ # this for loop sorts and integrated avaerge blank values for each datafile...
resp<-read.csv(file.path(path, (table_o_files[i,2])), header=T, sep=",", na.string="NA", as.is=T)
resp_sorted <- resp[
with(resp, order(resp$Date, resp$RUN)), # order the data by Date and Run
]
# name a new column that will can call common blanks, calculate a mean, and integrate back into a the dataset
resp_sorted$newname <- paste(
resp_sorted$Date, (substr(resp_sorted$tank, 1, 4)), (substr(resp_sorted$ID, 6, 10)), sep="_") # combine date with tray and vial IDs as "newname"
dataMEANS <- ddply(resp_sorted, "newname", summarise, mean = mean(abs(b1.Lpc))) # summarise by "newname" to get mean values of the raw output O2 data in umnol min-1
MEANSblanks <- dataMEANS[(which(nchar(dataMEANS$newname) == 19)),] # Blank "newname" files are 19 characters - can call just blanks in this line
resp_sorted_2 <- merge(resp_sorted,MEANSblanks, by="newname", all = TRUE, sort = T) # new resp data file with triplicate mean blank values added
resp_sorted_2$row <- seq.int(nrow(resp_sorted_2)) # number the rows 1 - the end
# NOTE: now that the mean blank values are in "resp_sorted_2", we can correct the raw µmol min-1 data to the blanks
# each respirtion "run" had three vials for each tray (10 animals per vial, n = 30 total animals)
# and three blanks seawater samples from that corresponding tray - therefore we can correct just by calling +3 rows ahead of each animal resp rate (below)
# INSIDE FOR LOOP
for (j in 1:nrow(resp_sorted_2)){ # for each row in the dataset resp sorted_2, run the following...
# correct for the mean blank value alwyas three rows ahead! (this will also calculate a false resp value for blanks)
resp_sorted_2$resprate_CORRECTED <- ((abs(resp_sorted_2$b1.Lpc)) - (resp_sorted_2$mean[(resp_sorted_2$row+3)])) # correct to blank
# standardize by the volume of the SDR vial (4 ml) adn convert to µg hr-1 mm shell size-1 (now blanks will be NA in "resprate_CALC")
resp_sorted_2$resprate_CALC <- ((((resp_sorted_2$resprate_CORRECTED/(1000/4))*(60))*31.998)/(resp_sorted_2$number_indivs*resp_sorted_2$mean_size)) # convert and standardize
} # close inside for loop
# This small function eliminates a dataframe based on NA of a certain columns
# In this case, the NA are in resprate_CALC
desiredCols <- resp_sorted_2[,c(3,4,5,7,10,11,12,16,19)]
completeFun <- function(resp_sorted_2, desiredCols) {
completeVec <- complete.cases(resp_sorted_2[, desiredCols])
return(resp_sorted_2[completeVec, ])
}
# run completeFun
resp_FINAL<-completeFun(resp_sorted_2, "resprate_CALC")
# order the dataframe
resp_FINAL <- resp_sorted_2[
with(resp_sorted_2, order(resp_sorted_2$Date, resp_sorted_2$RUN, resp_sorted_2$SDR_position)),
]
# start a cumulative data sheet for each outer for loop of resp values for each dataset
# each filename in the outside for loop is a new row in "cumulative_respCALC"
cumulative_respCALC[,i] <- resp_FINAL$resprate_CALC
print(cumulative_respCALC) # view progress of the loops in console
# write the output
cumulative_respCALC[,11] <- resp_sorted$Date # add date
cumulative_respCALC[,12] <- resp_sorted$SDR_position # add sdr position ID
cumulative_respCALC[,13] <- resp_sorted$RUN # add run number
cumulative_respCALC[,14] <- resp_sorted$tank # add tray ID
cumulative_respCALC[,15] <- resp_sorted$Day_Trial # add the day and run
cumulative_respCALC[,16] <- resp_sorted$Treat1_Treat2 # add treatments
cumulative_respCALC[,c(11,12,13,14,15,16,1,2,3,4,5,6,7,8,9,10)] # re organize for clarity
} # closed OUTSIDE for loop
# This small function eliminates a datafram based on NA of a certain columns
# In this case, the NA are in resprate_CALC
desiredCols2 <- cumulative_respCALC[,c(11,12,13,14,15,16,1,2,3,4,5,6,7,8,9,10)] # we want all of the columns
completeFun <- function(cumulative_respCALC, desiredCols2) {
completeVec <- complete.cases(cumulative_respCALC[, desiredCols2])
return(cumulative_respCALC[completeVec, ])
}
# eliminates all rows with NA in "LpcResp_alpha0.6_min10-25.csv"
cumulative_respCALC_FINAL <- resp_FINAL<-completeFun(cumulative_respCALC, "LpcResp_alpha0.6_min10-25.csv") # choose an example of a data file
print(cumulative_respCALC_FINAL)
write.table(cumulative_respCALC_FINAL,"All_resp_calc_and_standardized.csv",sep=",", row.names=FALSE) # write final table
|
e2aea512ac374c964a5d855b30e20b5cd631ab9a | 0b7d31819adf568850f5a9c7f3d454c39343b7ef | /ex8.R | 7c2de8957e488d5fa8ef51822975634353afc0ce | [] | no_license | cccccx/DA | e79c9d6a6d3f2127fcb41e6f27511ed53e67db19 | 47d2444c020aba5a1da80835e6144b83a3dc5e7d | refs/heads/master | 2020-05-07T22:16:16.968607 | 2015-04-23T12:37:27 | 2015-04-23T12:37:27 | 30,365,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,509 | r | ex8.R | library("NLP")
library("openNLP")
library("openNLPmodels.en")
library("tm")
library("tau")
library("koRpus")
library("SnowballC", lib.loc="/Library/Frameworks/R.framework/Versions/3.1/Resources/library")
library("topicmodels", lib.loc="/Library/Frameworks/R.framework/Versions/3.1/Resources/library")
`r2` <- read.csv("~/Desktop/dm/r2.csv")
data<-r2
#Task 1:Explore the data and undertake any cleaning/pre-processing that you deem necessary for the data to be analysed.
#Delete not-used data
used.data<-data[-which(data$purpose == "not-used"),]
#Transformations
data.text<-VectorSource(used.data[,123])
text<-Corpus(data.text)
text <- tm_map(text,content_transformer(function(x) iconv(x, to='UTF-8-MAC', sub='byte')),mc.cores=1)
#Strip whitepace
text <- tm_map(text, stripWhitespace)
#Remove numbers
text <- tm_map(text, removeNumbers)
#Remove the puntuation
text <- tm_map(text, removePunctuation)
#Change the words to lower cases
text <- tm_map(text, tolower)
#Remove stopwords
text <- tm_map(text, removeWords, stopwords("english"))
#Stem words in a text document using Porter‘s stemming algorithm
text <- tm_map(text, stemDocument)
text <- tm_map(text, PlainTextDocument)
#Task 2:Obtain feature representations of the documents/news articles as discussed in the text mining lectures.
#Get the Document Term Matrix
dtm <- DocumentTermMatrix(text)
rowTotals <- apply(dtm , 1, sum) #Find the sum of words in each Document
dtm111.new <- dtm[rowTotals> 0, ] #remove all docs without words
dtm.del<-dtm[rowTotals==0,]
dtmdel<-as.data.frame(inspect(dtm.del))
#Remove low frequency terms
dtm2 <- removeSparseTerms(dtm111.new,sparse=0.92)
data.dtm2<-as.data.frame(inspect(dtm2))
#Keep all the terms from DTM, and make DTM binary weight
for(h in 1:9849){
for(i in 1:64){
if(data.dtm2[h,i]!=0)
data.dtm2[h,i]<-1
}
}
write.csv(data.dtm2, "DTM_Feature_fin.csv", row.names = F)
#LDA
data.dtm1<-as.data.frame(inspect(dtm))
rowTotals <- apply(data.dtm1 , 1, sum) #Find the sum of words in each Document
dtm.new <- data.dtm1[rowTotals> 0, ] #remove all docs without words
lda <- LDA(dtm.new, control = list(alpha = 0.1), k = 10, method = "VEM")
terms(lda,10)
term<- terms(lda,10)
lda.terms <- c(term[,1], term[,2],term[,3], term[,4], term[,5], term[,6], term[,7], term[,8], term[,9], term[,10])
unique.terms <- unique(lda.terms)
topics <- topics(lda)
length.topics <- length(topics(lda))
col.name <- unique.terms
lda.features <- as.data.frame(matrix(NA, length.topics, length(col.name)))
colnames(lda.features) <- col.name
#Get LDA features matrix
for(i in 1:length(topics)){
txt<-match(term[,topics[i]], unique.terms)
lda.features[i,]<-0
lda.features[i,txt]<-1
}
write.csv(lda.features, "LDA_Feature_fin.csv", row.names = F)
#Combine DTM features with LDA features
fin.data.col.name<-c(colnames(lda.features),colnames(data.dtm2))
#Remove the the same features
uniqcol.name<-unique(fin.data.col.name)
fin.features.data<-as.data.frame(matrix(0,9849,length(uniqcol.name)))
colnames(fin.features.data)<-uniqcol.name
fincolname<-colnames(fin.features.data)
#Get the final features data
for( i in 1:9849){
matchDTM <- match(fincolname, colnames(data.dtm2)[which(data.dtm2[i,] == 1)], nomatch = 0) != 0
matchLDA<- match(fincolname, colnames(lda.features)[which(lda.features[i,] == 1)], nomatch = 0) != 0
fin.features.data[i,matchDTM]<-1
fin.features.data[i,matchLDA]<-1
}
write.csv(fin.features.data, "finfeatures.csv", row.names = F)
#Task3: Build classifiers, using R libraries to predict the TOPICS tags for documents.
#Get the data after NLP
used.data2<-used.data
dataframe<-data.frame(text=unlist(sapply(text, `[`, "content")),
stringsAsFactors=F)
used.data2[,124]<-NA
used.data2[,124]<-dataframe
row.del<-c(which(used.data2[,124] == ""),which(is.na(used.data2[,124])))
data.final<-used.data2[-row.del,]
#Get ten topics
ten.top<-c("topic.earn", "topic.acq",
"topic.money.fx", "topic.grain",
"topic.crude", "topic.trade",
"topic.interest", "topic.ship",
"topic.wheat", "topic.corn")
#Get these ten topic rows from the data after NLP
col.data<-c()
for(i in 1:10){
col.data<-c(col.data, which(colnames(data.final) == ten.top[i]))
}
#Link the data with seclected features
data.for.cla<-cbind(data.final[,c(3,col.data)],fin.features.data)
#Since some docs belong to more than one topic, we need to extend the data set
data.for.cla[,"class"]<-0
cla<-ncol(data.for.cla)
for(i in 1:9849){
if(sum(data.for.cla[i,2:11]) > 1){
for(j in 2:11){
if(data.for.cla[i,j] == 1){
newr<-data.for.cla[i,]
newr[,2:11]<-0
newr[cla]<-colnames(data.for.cla)[j]
data.for.cla<-rbind(data.for.cla,newr)
}
}
}
if(sum(data.for.cla[i,2:11]) == 1){
data.for.cla[i,cla]<-colnames(data.for.cla)[which(data.for.cla[i,1:11] == 1)]
}
}
#Clean data
data.for.cla1<-data.for.cla[-which(data.for.cla[,cla] == 0),]
drops<-c("topic.earn", "topic.acq",
"topic.money.fx", "topic.grain",
"topic.crude", "topic.trade",
"topic.interest", "topic.ship",
"topic.wheat", "topic.corn")
data.df<-data.for.cla1[,!(names(data.for.cla1) %in% drops)]
for(i in 1:length(data.df)){
data.df[,i]<-as.factor(data.df[,i])
}
train<-data.df[which(data.df$purpose == "train"),]
test<-data.df[which(data.df$purpose == "test"),]
library("e1071")
library("randomForest")
########FOR TRAIN DATA#########
########Naivebayes#######
naivebayes.model<-naiveBayes(class ~ ., data = train)
predict.nb<-predict(naivebayes.model, newdata = train)
nb.table<-table(predict.nb, train[,length(train)])
nb.data<-as.data.frame(nb.table)
coln<-c("TP", "FN", "FP", "Recall", "Precision", "Accuracy","F-measure")
NB.parameters = matrix(0, 10, 7, dimnames=list(ten.top,coln))
for(i in 1:10){
NB.parameters[i,1] = nb.table[i,i];
NB.parameters[i,2] = sum(nb.table[-i,i]);
NB.parameters[i,3] = sum(nb.table[i,-i]);
NB.parameters[i,4] = NB.parameters[i,1]/(NB.parameters[i,1]+NB.parameters[i,2]);
NB.parameters[i,5] = NB.parameters[i,1]/(NB.parameters[i,1]+NB.parameters[i,3]);
NB.parameters[i,6] = NB.parameters[i,1]/sum(nb.table);
NB.parameters[i,7]=(2*NB.parameters[i,5]*NB.parameters[i,4])/(NB.parameters[i,5]+NB.parameters[i,4])
}
NB.train.parameters<-NB.parameters
nb.overall.acc<-sum(NB.parameters[,6])
nb.marco.recall<-sum(NB.parameters[,4])/10
nb.marco.precision<-sum(NB.parameters[,5])/10
nb.micro.recall<-sum(NB.parameters[,1])/(sum(NB.parameters[,1])+sum(NB.parameters[,2]))
nb.micro.precision<-sum(NB.parameters[,1])/(sum(NB.parameters[,1])+sum(NB.parameters[,3]))
###Randomforest###
rf.model <- randomForest(class~., data = train)
rf.predict <- predict(rf.model, newdata = train)
RF.table <- table(observed=train[,length(train)], predicted = rf.predict)
RF.parameters = matrix(0, 10, 7, dimnames=list(ten.top,coln))
for(i in 1:10){
RF.parameters[i,1] = RF.table[i,i];
RF.parameters[i,2] = sum(RF.table[-i,i]);
RF.parameters[i,3] = sum(RF.table[i,-i]);
RF.parameters[i,4] = RF.parameters[i,1]/(RF.parameters[i,1]+RF.parameters[i,2]);
RF.parameters[i,5] = RF.parameters[i,1]/(RF.parameters[i,1]+RF.parameters[i,3]);
RF.parameters[i,6] = RF.parameters[i,1]/sum(RF.table);
RF.parameters[i,7]=(2*RF.parameters[i,5]*RF.parameters[i,4])/(RF.parameters[i,5]+RF.parameters[i,4])
}
RF.train.parameters<-RF.parameters
RF.overall.acc<-sum(RF.parameters[,6])
RF.marco.recall<-sum(RF.parameters[,4])/10
RF.marco.precision<-sum(RF.parameters[,5])/10
RF.micro.recall<-sum(RF.parameters[,1])/(sum(RF.parameters[,1])+sum(RF.parameters[,2]))
RF.micro.precision<-sum(RF.parameters[,1])/(sum(RF.parameters[,1])+sum(RF.parameters[,3]))
####SVM#####
svm.model <- svm(class~., data = train,kernel="linear" )
svm.predict <- predict(svm.model, newdata = train)
SVM.table <- table(observed=train[,length(train)], predicted = svm.predict)
SVM.parameters = matrix(0, 10, 7, dimnames=list(ten.top,coln))
for(i in 1:10){
SVM.parameters[i,1] = SVM.table[i,i];
SVM.parameters[i,2] = sum(SVM.table[-i,i]);
SVM.parameters[i,3] = sum(SVM.table[i,-i]);
SVM.parameters[i,4] = SVM.parameters[i,1]/(SVM.parameters[i,1]+SVM.parameters[i,2]);
SVM.parameters[i,5] = SVM.parameters[i,1]/(SVM.parameters[i,1]+SVM.parameters[i,3]);
SVM.parameters[i,6] = SVM.parameters[i,1]/sum(SVM.table);
SVM.parameters[i,7]=(2*SVM.parameters[i,5]*SVM.parameters[i,4])/(SVM.parameters[i,5]+SVM.parameters[i,4])
}
SVM.train.parameters<-SVM.parameters
SVM.parameters[2,4]<-0
SVM.parameters[10,4]<-0
SVM.parameters
SVM.overall.acc<-sum(SVM.parameters[,6])
SVM.marco.recall<-sum(SVM.parameters[,4])/10
SVM.marco.precision<-sum(SVM.parameters[,5])/10
SVM.micro.recall<-sum(SVM.parameters[,1])/(sum(SVM.parameters[,1])+sum(SVM.parameters[,2]))
SVM.micro.precision<-sum(SVM.parameters[,1])/(sum(SVM.parameters[,1])+sum(SVM.parameters[,3]))
####Use these classifiters for test data##
##Naivebayes
naivebayes.model<-naiveBayes(class ~ ., data = train)
predict.nb<-predict(naivebayes.model, newdata = test)
nb.test.table<-table(predict.nb, test[,length(test)])
coln<-c("TP", "FN", "FP", "Recall", "Precision", "Accuracy")
NB.parameters1 = matrix(0, 10, 6, dimnames=list(ten.top,coln))
for(i in 1:10){
NB.parameters1[i,1] = nb.test.table[i,i];
NB.parameters1[i,2] = sum(nb.test.table[-i,i]);
NB.parameters1[i,3] = sum(nb.test.table[i,-i]);
NB.parameters1[i,4] = NB.parameters1[i,1]/(NB.parameters1[i,1]+NB.parameters1[i,2]);
NB.parameters1[i,5] = NB.parameters1[i,1]/(NB.parameters1[i,1]+NB.parameters1[i,3]);
NB.parameters1[i,6] = NB.parameters1[i,1]/sum(nb.test.table);
}
nb.accuracy<-sum(NB.parameters1[,6])
#####Randomforest###
rf.model1 <- randomForest(class~., data = train)
rf.predict1 <- predict(rf.model1, newdata = test)
RF.table1 <- table(observed=test[,length(test)], predicted = rf.predict1)
coln<-c("TP", "FN", "FP", "Recall", "Precision", "Accuracy")
RF.parameters1 = matrix(0, 10, 6, dimnames=list(ten.top,coln))
for(i in 1:10){
RF.parameters1[i,1] = RF.table1[i,i];
RF.parameters1[i,2] = sum(RF.table1[-i,i]);
RF.parameters1[i,3] = sum(RF.table1[i,-i]);
RF.parameters1[i,4] = RF.parameters1[i,1]/(RF.parameters1[i,1]+RF.parameters1[i,2]);
RF.parameters1[i,5] = RF.parameters1[i,1]/(RF.parameters1[i,1]+RF.parameters1[i,3]);
RF.parameters1[i,6] = RF.parameters1[i,1]/sum(RF.table1);
}
RF.parameters1[2,4]<-0
RF.parameters1[10,4]<-0
RF.accuracy<-sum(RF.parameters1[,6])
RF.precision<-sum(RF.parameters1[,5])/10
RF.recall<-sum(RF.parameters1[,4])/10
#####SVM###
svm.model1 <- svm(class~., data = train,kernel="linear" )
svm.predict1 <- predict(svm.model1, newdata = test)
SVM.table1 <- table(observed=test[,length(test)], predicted = svm.predict1)
SVM.parameters1 = matrix(0, 10, 6, dimnames=list(ten.top,coln))
for(i in 1:10){
SVM.parameters1[i,1] = SVM.table1[i,i];
SVM.parameters1[i,2] = sum(SVM.table1[-i,i]);
SVM.parameters1[i,3] = sum(SVM.table1[i,-i]);
SVM.parameters1[i,4] = SVM.parameters1[i,1]/(SVM.parameters1[i,1]+SVM.parameters1[i,2]);
SVM.parameters1[i,5] = SVM.parameters1[i,1]/(SVM.parameters1[i,1]+SVM.parameters1[i,3]);
SVM.parameters1[i,6] = SVM.parameters1[i,1]/sum(SVM.table1);
}
SVM.accuracy<-sum(SVM.parameters1[,6])
###Task4:cluster####
###kmeans###
#Remove the purpose and class columns
drops123 <- c("purpose","class")
data.cluster<-data.df[,!(names(data.df) %in% drops123)]
library("cluster")
# Determine number of clusters
wss <- (nrow(data.cluster)-1)*sum(apply(data.cluster,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(data.cluster, centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",ylab="Within groups sum of squares")
fit <- kmeans(data.cluster, 10)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
library(cluster)
clusplot(data.cluster, fit$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
# Centroid Plot against 1st 2 discriminant functions
library(fpc)
plotcluster(data.cluster, fit$cluster)
plot(fit)
#### Model Based Clustering###
library(mclust)
fit1 <- Mclust(data.cluster)
plot(fit1) # plot results
summary(fit1) # display the best model
#### Ward Hierarchical Clustering####
d <- dist(data.cluster, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward")
plot(fit) # display dendogram
groups <- cutree(fit, k=10) # cut tree into 10 clusters
# draw dendogram with red borders around the 10 clusters
rect.hclust(fit, k=10, border="red")
|
cb4375bafd0603b39be4c00308f42989c2c58d74 | 58fd3b96113e90860cc683c1dd29250c2f965e9a | /R/svy_freq.R | 284f7828e694cb10d01bad7cfd8b2b5a4dc3ae77 | [] | no_license | dacarras/r4sda | d0a75ef2f6425dc263ff8638345e0191d58f56df | 702465d57f125551eb71250a1b2c4ba106a9cca9 | refs/heads/master | 2023-01-13T07:30:45.381142 | 2022-12-29T14:23:57 | 2022-12-29T14:23:57 | 167,874,962 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,743 | r | svy_freq.R | #' svy_freq() estimates proportion for observed values of a categorical variable, from a survey object.
#'
#' @param data a survey object, or survey data frame (i.e. TSL, JKN, BRR or else)
#' @param variable selected varible from the survey object
#'
#' @return a tibble with the estimated proportion, and their 95% confidence interval
#'
#' @examples
#'
#'
#' data_svy %>%
#' svy_freq(
#' data = .,
#' variable = selected_var)
#'
#' @export
svy_freq <- function(data, variable){
require(dplyr)
require(rlang)
svy_data <- data
var_name <- enquo(variable)
col_name <- quo_name(enquo(variable))
reg_name <- tibble::tibble(variable = !!col_name) %>%
.$variable %>%
as.character()
list_of_values <- svy_data$variables %>%
dplyr::select(one_of(reg_name)) %>%
dplyr::distinct() %>%
r4sda::remove_labels() %>%
haven::zap_formats() %>%
dplyr::arrange(across(one_of(reg_name))) %>%
as.list() %>%
unlist() %>%
as.character() %>%
as.numeric()
single_value <- function(value){
model_formula <- as.formula(paste0('~I(',reg_name,' %in% ',value,')'))
survey::svyciprop(model_formula, svy_data, method="lo", df=degf(svy_data), level = .95) %>%
c(attr(.,"ci")) %>%
tibble::as_tibble() %>%
mutate(term = c('est','ll','ul')) %>%
tidyr::gather(key = 'response', value = 'estimates', -term) %>%
tidyr::spread(key = 'term', value = 'estimates') %>%
mutate(response = as.character(value))
}
table_freq <- list_of_values %>%
purrr::map(single_value) %>%
purrr::reduce(dplyr::bind_rows)
return(table_freq)
# Note: based on IRTM approach:
# https://stackoverflow.com/questions/40461753/finding-proportions-for-categorical-data-in-a-survey
}
|
584d1de74ca5b9d2f9ce6e17fc9fe0aa4700045b | 935c46d25701ca565492a61506921b144c032c8b | /group_lasso/group_lasso_old.R | 921fa3eb1c0e16bbac7943ed49ed27128b56d248 | [] | no_license | GTEx-etc-jusuE404/tissue-specific-expression | 34bdc3adcceb71616deb7583d77f142cd05d83a8 | 518f99072004afdf2791154037d78a38e27ae1e7 | refs/heads/master | 2022-05-06T15:09:21.041325 | 2016-12-10T08:41:16 | 2016-12-10T08:41:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,749 | r | group_lasso_old.R | rm(list=ls())
# load libraries
library(grplasso)
library(AUC)
library(ggplot2)
library(caret)
cv.grplasso <- function(x, y, index, nfolds = 5, nlamdas = 20, plot.error=FALSE) {
# select the parameters
lambda <- lambdamax(x, y = y, index = index, penscale = sqrt, model = LogReg()) * 0.5^seq(0,5,len=nlamdas)
N <- nrow(x)
y <- drop(y)
if (nfolds < 3)
stop("nfolds must be bigger than 3; nfolds=5 recommended")
aucs <- matrix(,nrow=nfolds,ncol=length(lambda))
flds <- createFolds(y,nfolds,list=TRUE,returnTrain=FALSE)
for (i in seq(nfolds)) {
test.index <- flds[[i]]
y_test <- y[test.index]
x_test <- x[test.index, , drop = FALSE]
y_train <- y[-test.index]
x_train <- x[-test.index, , drop = FALSE]
fit <- grplasso(x = x_train, y = y_train, index = index, lambda = lambda, model = LogReg(), penscale = sqrt,
control = grpl.control(update.hess = "lambda", trace = 0))
coeffs <- fit$coefficients
pred.resp <- predict(fit, x_test, type = "response")
for (j in seq(length(lambda))) {
predictions <- pred.resp[,j]
aucs[i,j] <- auc(accuracy(predictions,as.factor(y_test)))
}
}
# fit the model and store the outputs
# foldid <- sample(rep(seq(nfolds), length = N))
# for (i in seq(nfolds)) {
# which <- foldid == i
# y_train <- y[!which]
# x_train <- x[!which, , drop = FALSE]
# y_test <- y[which]
# x_test <- x[which, , drop = FALSE]
# fit <- grplasso(x = x_train, y = y_train, index = index, lambda = lambda, model = LogReg(), penscale = sqrt,
# control = grpl.control(update.hess = "lambda", trace = 0))
# coeffs <- fit$coefficients
# pred.resp <- predict(fit, x_test, type = "response")
# # compute the auc for each of the lambda parameters
# # auc_score = accuracy(y,coeffs)
# for (j in seq(length(lambda))) {
# predictions <- pred.resp[,j]
# aucs[i,j] <- auc(accuracy(predictions,as.factor(y_test)))
# }
# }
error.mean = apply(aucs,2,mean)
error.sd = apply(aucs,2,sd)
# plot the c.v. error
if (plot.error) {
error.high = error.mean + error.sd
error.low = error.mean - error.sd
plot(lambda,error.mean,
xlab="Lambda (log)",ylab="CV AUC Score",
log="x",xlim=rev(range(lambda)),ylim = c(0,1))
arrows(lambda,error.high,lambda,error.low,col=2,angle=90,length=0.05,code=3)
}
# select the variable that minmizes the average CV
max.idx <- which.max(error.mean);
lambda.opt <- lambda[max.idx]
cv.error <- error.mean[max.idx]
return(list(lambda = lambda.opt, error=cv.error))
}
split_data <- function(gene_features, labels, train_set_size=0.67) {
set.seed(1)
# train_set_size: Fraction of genes used for training set
train.index <- createDataPartition(labels, p = train_set_size, list = FALSE)
train = list(x=gene_features[train.index,],y=labels[train.index])
test = list(x=gene_features[-train.index,],y=labels[-train.index])
#num_samples = length(labels)
#num_features = dim(gene_features)[1]
#num_train_samples = ceiling(train_set_size*num_samples)
#sample_order = sample(1:num_samples, num_samples) # permute the samples
#train_idx = sample_order[1:num_train_samples]
#test_idx = sample_order[(num_train_samples+1):num_samples]
#train = list(x=gene_features[train_idx,],y=labels[train_idx])
#test = list(x=gene_features[test_idx,],y=labels[test_idx])
return(list(train=train, test=test))
}
group.to.int <- function(group2col, specific=TRUE) {
if (specific) {
group = group2col[,2]
} else {
group = group2col[,1]
}
type.names = unique(group)
type.counts = rep(0,length(type.names))
group.idx = rep(0,dim(group2col)[1])
for (i in 1:length(group.idx)) {
type.idx = which(type.names == group[i])
group.idx[i] = type.idx
type.counts[type.idx] = type.counts[type.idx] + 1
}
names(type.counts) = type.names
return(list(types=type.counts,idx=group.idx))
}
load.pos.neg.sets <- function(pos.name,neg.name,grp.name,specific=TRUE,transform=FALSE) {
pos.data <- read.table(pos.name,sep='\t',row.names=1,skip=2)
neg.data <- read.table(neg.name,sep='\t',row.names=1,skip=2)
group <- read.table(grp.name,sep='\t',row.names=1,skip=1)
# group: tissue type, tissue specific type
data <- rbind(pos.data,neg.data)
response <- c(rep(1,dim(pos.data)[1]),rep(0,dim(neg.data)[1]))
group.info <- group.to.int(group,specific)
if (transform) {
# log10(x+1) and standardize each feature
# iterate through each column
for (i in 1:dim(data)[2]) {
feature <- data[,i]
log.feat <- apply(t(feature),1,function(x) log10(x+1))
log.feat <- log.feat - mean(log.feat) # centering
if (sd(log.feat) > 1e-10) { # scaling
log.feat <- log.feat / sd(log.feat)
}
data[,i] <- log.feat # update the feature
}
}
return(list(x=data,
y=response,
group=group.info$idx,
types=group.info$types))
}
reduce.features <- function(grouped.data, ndim=3) {
# reduce the dimension of each feature to ndim, and remove features that have less than ndim
type.counts <- grouped.data$types
x <- grouped.data$x
if (dim(x)[1] < ndim) {
stop('Error: # of samples is less than # of subtype dimentions: ', dim(x)[1],'<',ndim)
}
nusable <- 0
for (i in 1:length(type.counts)) {
if (type.counts[i] < ndim) {
cat('Warning: ',names(type.counts)[i],' has too few dimensions and not included in the model\n')
} else {
nusable = nusable + 1
}
}
new.x <- matrix(nrow=dim(x)[1], ncol=ndim*nusable)
new.groups <- numeric(ndim*nusable)
new.types <- numeric(0)
for (i in 1:length(type.counts)) {
if (type.counts[i] < ndim) {
next
} else {
new.types <- c(names(type.counts)[i],new.types)
}
i.type <- length(new.types)
sel <- which(grouped.data$group == i)
# check if any columns are zero
remove <- numeric(0)
for (j in 1:length(sel)) {
if ( sd(x[,sel[j]]) < 0.001) {
# print(x[,sel[j]])
remove <- c(remove,j) # remove the entry
}
}
if (length(remove)>0) {
sel <- sel[-remove]
# cat('Warning: removed columns: ',remove,' with zero variance\n')
}
pca <- prcomp(x[ ,sel],center = TRUE, scale. = TRUE)
new.x[ ,((i.type-1)*ndim+1):(i.type*ndim)] <- pca$x[ ,1:ndim]
new.groups[((i.type-1)*ndim+1):(i.type*ndim)] <- rep(i.type,ndim)
}
# print(new.types)
# print(new.groups)
# print(dim(new.x))
# stop('lol')
rownames(new.x) <- rownames(grouped.data$x)
return(list(x=new.x,
y=grouped.data$y,
group=new.groups,
types=new.types))
}
plot.coefficient.path <- function(x,y,index) {
lambda <- lambdamax(x, y = y, index = index, penscale = sqrt, model = LogReg()) * 0.5^seq(0,5,len=10)
fit <- grplasso(x = x, y = y, index = index, lambda = lambda, model = LogReg(), penscale = sqrt,
control = grpl.control(update.hess = "lambda", trace = 0))
plot(fit,log='x')
}
## set seed for reproducability
main <- '/Users/jasonzhu/Documents/CS341_Code/'
dir.name <- paste(main,'data/experiment_inputs/',sep='')
grp.name <- paste(main,'data/samples_to_tissues_map.txt',sep='')
all.go.name <- paste(main,'data/GO_terms_final_gene_counts.txt',sep='')
go.names <- read.table(all.go.name)$V1
ndim <- 5
for (go.idx in 260:length(go.names)){
set.seed(1)
go.term <- as.character(go.names[go.idx])
# go.term <- 'GO:0000578'
neg_idx <- 0
neg_pfx <- paste(paste('_neg_',neg_idx,sep=''),'.txt.txt',sep='')
out_pfx <- paste(paste(go.term,'_',sep=''),neg_idx,sep='')
out.name <- paste(paste(paste(main,'data/grplasso_results/grplasso_',sep=''),out_pfx,sep=''),'.txt',sep='')
## load all data
cat('----------------------------------------\n')
cat('Loading raw data from',go.term,'...\n')
pos.name <- paste(dir.name,paste(go.term,'_pos.txt.txt',sep=''),sep='') # filename for positive set
neg.name <- paste(dir.name,paste(go.term,neg_pfx,sep=''),sep='') # filename for negative set
full.data <- load.pos.neg.sets(pos.name,neg.name,grp.name,specific=TRUE,transform=TRUE)
## feature extraction for each tissue type
# cat('----------------------------------------\n')
cat('Reducing dimension of group features...\n')
dim.red <- reduce.features(full.data,ndim=ndim)
index <- c(NA, dim.red$group)
full.x <- cbind(1, dim.red$x) # add intercept
full.y <- dim.red$y
## fit the data with coefficient path
# plot.coefficient.path(full.x,full.y,index)
## split data
# cat('----------------------------------------\n')
data <- split_data(full.x,full.y)
cat('dimensionality of data: ',dim(data$train$x)[2],'\n')
cat('# of training samples: ',length(data$train$y),'\n')
cat('# of test samples: ',length(data$test$y),'\n')
## apply cv for grplasso
# cat('----------------------------------------\n')
cat('Training group lasso classifier...')
x <- data$train$x
y <- data$train$y
cv.result <- cv.grplasso(x,y,index,nfolds=3,plot.error=FALSE)
## Re-fit the model with the best tuning paramter from cross-validation
fit <- grplasso(x, y = y, index = index, lambda = cv.result$lambda, model = LogReg(),
penscale = sqrt,control = grpl.control(update.hess = "lambda", trace = 0))
cat('done\n')
cat('cross-validaiton error:',cv.result$error,'\n')
cat('parameter (lambda) tuned as:',cv.result$lambda,'\n')
## compute the test error
# cat('----------------------------------------\n')
prediction <- predict(fit, data$test$x, type = "response")
auc.val <- auc(accuracy(prediction,as.factor(data$test$y)))
cat('Test Error:',auc.val,'\n')
## store the coefficients of the fit
sink(out.name)
model <- 'group_lasso'
cat('# Prediction results for:\t',go.term,'\n')
cat('# Model used:\t',model,'\n')
cat('# ROC AUC score:\t',auc.val,'\n')
cat('# Dimension per tissue:\t',ndim,'\n')
if (length(full.data$types) > length(dim.red$types)) {
cat('# Tissues used: ',length(dim.red$types),'\n')
} else {
cat('# All tissues were included\n')
}
cat('# Best penalty parameter (CV):', cv.result$lambda,'\n')
grplasso.ceoff <- fit$coefficients
for (i in 1:length(dim.red$types)) {
cat('# tissue\t',i,'\t', dim.red$types[i],'\n')
}
cat('# Coefficients:\n')
for (i in 1:length(dim.red$group)) {
cat(dim.red$group[i],'\t',grplasso.ceoff[i+1],'\n')
}
cat('# Gene ID\tLabel\tPrediction\n')
for (i in 1:length(prediction)) {
cat(rownames(data$test$x)[i],'\t',data$test$y[i],'\t',prediction[i],'\n')
}
sink()
# file.show(out.name)
cat('saved result to output\n')
# cat('----------------------------------------\n')
}
|
84da040645b4b46f523dce272a125baf87e3f9e5 | 9dab19a3e8a1f11bb5de7666bf90427dcda71380 | /data-raw/MyRestaurants.R | bbbe1649c73b801449dbda01c8fedb38abe7ac2b | [] | no_license | srkwon/mdsr | 4a6aa4f2a400b069c73b538b2b964dc080ec48c3 | 389915038707052f92585d73eaf55bd13d4eb1cf | refs/heads/master | 2023-03-29T17:17:01.418014 | 2021-03-29T20:05:33 | 2021-03-29T20:05:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,159 | r | MyRestaurants.R | # MyRestaurants
# df <- read.socrata("https://data.cityofnewyork.us/resource/fhrw-4uyv.csv?$where=date_trunc_ymd(created_date)='2015-03-18'")
url <- "https://data.cityofnewyork.us/api/views/xx67-kt59/rows.csv?accessType=DOWNLOAD"
download.file(url, "data-raw/dohmh_nyc_violations.csv")
require(readr)
require(dplyr)
violations <- readr::read_csv("data-raw/dohmh_nyc_violations.csv")
# problems
violations %>%
problems() %>%
group_by(col) %>%
summarize(N = n())
# fix variable names
names(violations) <- names(violations) %>%
tolower() %>%
gsub(" ", "_", x = .)
# non-standard spellings?
violations <- violations %>%
mutate(cuisine_description = ifelse(grepl("Coffee/Tea", cuisine_description), "Cafe/Coffee/Tea", cuisine_description))
# Cafe/Coffee/Tea
# set encoding for non-ASCII character
tools::showNonASCII(violations$cuisine_description)
# x <- as.character(violations$cuisine_description)
# Encoding(x) <- "latin1"
# y <- iconv(x, from = "latin1", to = "ASCII", sub = "e")
# violations$cuisine_description <- x
# note that other columns have UTF characters
bad <- apply(violations, MARGIN = 2, tools::showNonASCII)
str(bad)
# Lookup table for violations
ViolationCodes <- violations %>%
group_by(violation_code) %>%
summarize(critical_flag = first(critical_flag)
, violation_description = first(violation_description))
# Lookup table for Cuisines?
violations <- violations %>%
mutate(cuisine_code = as.factor(cuisine_description))
Cuisines <- as.tbl(data.frame(cuisine_code = 1:length(levels(violations$cuisine_code))
, cuisine_description = levels(violations$cuisine_code)))
library(lubridate)
Violations <- violations %>%
mutate(cuisine_code = as.numeric(cuisine_code)) %>%
select(-cuisine_description, -violation_description, -critical_flag) %>%
mutate(inspection_date = mdy(inspection_date)) %>%
mutate(grade_date = mdy(grade_date)) %>%
mutate(record_date = mdy(record_date))
save(Cuisines, file = "data/Cuisines.rda", compress = "xz")
save(ViolationCodes, file = "data/ViolationCodes.rda", compress = "xz")
save(Violations, file = "data/Violations.rda", compress = "xz")
|
46f691e11691a84456d3a9658b948a9c90eace21 | 1a5eca06828772da7009e6d160e164d223e05514 | /r_shiny/prevendo_qualidade_de_veiculos/ModeloCarros.R | ab2369b61cf2c771d98b210fec15dae9b9b21739 | [] | no_license | murilo-cremon/R-Lang | 2c4bc8ea214f2fda25c7e785e6cf96f8bdab32cb | 2d77d10bbf096c17aa03e87570d7612229f04910 | refs/heads/master | 2020-12-28T12:48:42.731279 | 2020-02-05T01:10:26 | 2020-02-05T01:10:26 | 238,333,593 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 181 | r | ModeloCarros.R | library(e1071)
carros <- read.csv(file = "car.data", sep = ",", header = TRUE)
modelo <- naiveBayes(class ~ ., data = carros)
saveRDS(modelo, file = "./RDS/naiveBayesCarros.rds") |
24f112e3f77da84e17b23bf1d5da0d1b4b28a432 | c666224d413bcd9d2697f39205619716b8f25a5c | /shiny/release_V3/server.R | a4466065e7d9e5937052f81afa414c987450582d | [] | no_license | xulong82/wgs | dbccd4e51da14bc169b4a3d79169e3792cddd444 | 88a7b5833ed34a1fca987e968d98b4c202ef325a | refs/heads/master | 2021-01-17T16:44:51.666100 | 2018-01-02T21:38:09 | 2018-01-02T21:38:09 | 58,670,966 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,608 | r | server.R | library(shiny)
library(ggvis)
library(dplyr)
library(ggplot2)
load("data.rdt")
for(obj in names(shinyList)) assign(obj, shinyList[[obj]])
shinyServer(function(input, output, session) {
table <- addi[c(17:22, 16, 7, 25:29, 33:35, 38, 39)]
points <- reactiveValues(selected = NULL)
output$table_1 <- renderTable({
table_1 <- addi[c(17:22, 16, 39, 7, 25:29, 33:36, 38)]
table_1$Consequence <- gsub(",", ", ", table_1$Consequence)
table_1
}, include.rownames = FALSE)
output$table_2 <- renderTable({
table_2 <- apoe4[c(17:22, 16, 39, 7, 25:29, 33:36, 38)]
table_2$Consequence <- gsub(",", ", ", table_2$Consequence)
table_2
}, include.rownames = FALSE)
output$table_3 <- renderTable({
table_3 <- apoe2[c(17:22, 16, 39, 7, 25:29, 33:36, 38)]
table_3$Consequence <- gsub(",", ", ", table_3$Consequence)
table_3
}, include.rownames = FALSE)
tooltip_values <- function(x) {
if (is.null(x)) return (NULL)
points$selected <- x$UID
entry <- table[table$UID == x$UID, c(1:4)]
paste0(names(entry), ": ", format(entry), collapse = "<br />")
}
gv1 <- reactive({
table %>%
ggvis(~MAF, ~pSnp, key := ~UID) %>%
layer_points() %>%
add_tooltip(tooltip_values, "hover")
}) %>% bind_shiny("ggvis")
output$note1 <- renderTable({
table[table$UID == points$selected, 1:17]
}, include.rownames = FALSE)
output$note2 <- renderTable({
table[table$UID == points$selected, c(7, 18)]
}, include.rownames = FALSE)
})
|
e29485288b13e55baf6b98af285758bbd164332a | 47602ccc1d2720b78caca95416b5008a22d8e68f | /samplecodes/add-one.R | 59d7962a83d6c2463f534b6c48cf51c8c6c3aeb5 | [
"CC0-1.0"
] | permissive | everdark/rbasic | a71c00ba943fbebcf8dbf729717595db814aaafb | a218f864b2f595241385ff6ab1d316e7b154b3cd | refs/heads/master | 2016-09-12T23:43:42.346555 | 2016-05-13T11:48:58 | 2016-05-13T11:48:58 | 55,966,164 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 93 | r | add-one.R | #!/usr/bin/env Rscript
cmdargs <- commandArgs(trailingOnly=TRUE)
as.integer(cmdargs[1]) + 1
|
d6cb5487f308c69c711eac5df86ffeeebfea21dd | 47232a6daa897c48fbee2c0ddf2ee9c3bc0a6a1e | /R/cite-loaded-packages.R | 06287e5a9f60bcd7e45896b3f93398a72c2aa3ca | [] | no_license | mustafaascha/mustafamisc | 99643af702c5283ccc7153fa61a8494b615b918d | b8ee198dc082734643e527b2a9e9b930fa9e2f50 | refs/heads/master | 2021-01-20T12:06:53.170004 | 2018-05-01T16:27:30 | 2018-05-01T16:27:30 | 66,119,021 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 285 | r | cite-loaded-packages.R |
#' Cite all loaded packages
#'
#' @return
#' @export
#'
#' @examples
#' library(Hmisc)
#' bib_loaded_packages()
#'
bib_loaded_packages <- function(){
lp <- loaded_packages <- search()
lp <- lp[grepl("package", lp)]
lp <- gsub("package:", "", lp)
lapply(lp, citation)
}
|
2e09522b2c891dbeca7ce6f85a7302aba4a35c56 | df3fb88cef9398b02709f12a9ee91691bdf349bc | /Oman R Users/Oman R Users Script.R | 2c9ab10c90f0bb106dcd36cc43c36a43c0a12779 | [] | no_license | ggSamoora/Workshops | e9deddfa41281ea9f421ee3367fbacdb22b7fa16 | a858adfbf206778d0a90231dae9f0bb004a9f579 | refs/heads/main | 2023-04-19T01:13:05.856196 | 2022-10-17T06:16:00 | 2022-10-17T06:16:00 | 537,260,304 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,005 | r | Oman R Users Script.R | # set the locale to Arabic
Sys.setlocale("LC_ALL","Arabic")
# load necessary packages
library(tidyverse) # for data wrangling
library(rvest) # for web scraping
library(data.table) # for faster merging of data
# start out with one link
# NOTE: You may need to change the link in here, because the url may have expired. Please visit the main page through the following url
# "https://om.opensooq.com/ar/%D8%B9%D9%82%D8%A7%D8%B1%D8%A7%D8%AA-%D9%84%D9%84%D8%A7%D9%8A%D8%AC%D8%A7%D8%B1/%D8%B4%D9%82%D9%82-%D9%84%D9%84%D8%A7%D9%8A%D8%AC%D8%A7%D8%B1"
# then click on any apartment with a price and use that url as the url variable
url <- "https://om.opensooq.com/ar/search/188269493/%D8%A8%D9%86%D8%A7%D9%8A%D9%87-%D8%AC%D8%AF%D9%8A%D8%AF%D9%87-%D9%81%D8%A7%D8%AE%D8%B1%D9%87-%D9%84%D9%84%D8%A7%D9%8A%D8%AC%D8%A7%D8%B1-%D8%A8%D9%85%D8%B7%D9%82%D9%87-%D8%A7%D9%84%D9%82%D9%88%D9%81"
# read the url as an html
page <- read_html(url)
# removes values and keeps headers
headers_and_values <- page %>% html_elements(xpath = "//li[@class='inline vTop relative mb15']") %>%
html_text() %>%
str_remove_all("\n") %>%
str_squish()
# create the empty dataframe
df <- matrix(ncol=10) %>% as.data.frame()
# headers only
headers_only <- headers_and_values %>%
str_remove_all("[:].*") %>%
str_squish()
# set the headers_only as the column names of df
colnames(df) <- c(headers_only)
# function to extract the values from the value-header combination
return_value <- function(h, h_v) {
val <- h_v[grepl(h, h_v)] %>%
str_remove(h) %>%
str_remove("[:]") %>%
str_squish()
if (length(val) == 0) {
return(NA)
} else {
return(val)
}
}
# apply the function to extract the values from the headers_and_values object
values_only <- map(headers_only, function(x) {return_value(x, headers_and_values)})
# add the values as a row to the df variable
df <- rbindlist(list(df, values_only))
# Now let's start with script to collect data from all apartments
#----------------------------------------------------------------
# the main URL (which contains the list of apartments)
url_main <- "https://om.opensooq.com/ar/%D8%B9%D9%82%D8%A7%D8%B1%D8%A7%D8%AA-%D9%84%D9%84%D8%A7%D9%8A%D8%AC%D8%A7%D8%B1/%D8%B4%D9%82%D9%82-%D9%84%D9%84%D8%A7%D9%8A%D8%AC%D8%A7%D8%B1"
# for loop to iterate over the first 3 pages of apartment postings
for (pg in 1:3) {
# visit the nth page of apartments
url <- paste0(url_main, "?page=", pg)
# store the url in an html object
page <- read_html(url)
# extract the URLs of each apartment posting
url_list <- page %>% html_elements(xpath = "//h2[@class='fRight mb15']") %>%
html_elements(xpath = ".//a") %>%
html_attr("href")
# for loop to iterate over the URLs of the apartment postings
for (i in 1:length(url_list)) {
# give the code a 1 second break to prevent errors
Sys.sleep(1)
# store the apartment posting url as an html object
page_2 <- read_html(paste0("https://om.opensooq.com", url_list[i]))
# extract the headers and values
headers_and_values <- page_2 %>% html_elements(xpath = "//li[@class='inline vTop relative mb15']") %>%
html_text() %>%
str_remove_all("\n") %>%
str_squish()
# extract the values only
values_only <- map(headers_only, function(x) {return_value(x, headers_and_values)})
# append the values as a row to the df variable
df <- rbindlist(list(df, values_only))
# progress bar of each apartment posting
{Sys.sleep(0.1); cat("\r",i)}
}
# progress bar of each apartment posting page
print(paste("Page:", pg, "Complete!"))
# allow code to sleep for 3 seconds to prevent errors
Sys.sleep(3)
}
# remove the first row of NAs from the dataset
df2 <- df %>% slice(-1L)
# write the dataframe to a csv file for future analysis
write_csv(df2, "All Oman Apartment Data.csv")
|
3f22ce41bb07a263cd207cbba9462c31a6f5bd8b | 3847a390b91d02bba38c0ce92218578de2a5e9c9 | /cachematrix.R | 48efcabe673313758da4d3b942e2d327376fe27b | [] | no_license | JZirnstein/ProgrammingAssignment2 | b2373230f27b68a00c887bda7d70b48604da788b | a095e0fa93636de378d4f9e3d38097f2d974f068 | refs/heads/master | 2021-01-18T02:12:12.928664 | 2014-12-17T02:46:55 | 2014-12-17T02:46:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,169 | r | cachematrix.R | ## The following functions will satisfy the Programming Assignment 2
## of the rprog-016 class on Coursera (December 2014)
##
## They will create a special object to store a matrix and cache its inverse.
## I'm assuming all matrices presented are invertable.
## This function creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverseValue) inv <<- inverseValue
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
8afb9a56c42975c83a0feb7b7d8a5f350d695718 | 8a9753149421692cece36951a7fcf4fe910978db | /06.treemix/06.run.r | fb1572c37a052b638ea03ee5d30bb56e62edad70 | [] | no_license | Huiying123/Populus_speciation | e9b17307a8477eb21d822d312c87415b05277edf | fb8ce330b437b0bd45b008d9872ff6360b6b24f7 | refs/heads/main | 2023-04-18T21:58:30.779900 | 2022-12-06T08:46:36 | 2022-12-06T08:46:36 | 348,393,839 | 5 | 1 | null | 2022-01-23T14:43:57 | 2021-03-16T15:13:30 | Shell | UTF-8 | R | false | false | 295 | r | 06.run.r | library(RColorBrewer)
library(R.utils)
source("plotting_funcs.R") # here you need to add the path
par(mfrow=c(4,3))
for(edge in 0:10){
plot_tree(cex=0.8,paste0(prefix,".",edge))
title(paste(edge,"edges"))
}
for(edge in 0:10){
plot_resid(stem=paste0(prefix,".",edge),pop_order="dogs.list")
} |
f8201a4cf9808e0b29c594a9add8526a7cb949ce | 701126efc2e5c4fd913fb880afc65ffc5591b4d2 | /man/plotDensities.Rd | 3052b79361d69dd9de5dee526a19baeee790ff0e | [] | no_license | timpeters82/aaRon | 340f9eb00c70879bb5f173b6433bf7ca5c74c3e3 | ea2c009121b8221740a9a04f64389306d13cb5bf | refs/heads/master | 2020-12-25T17:56:27.536462 | 2017-04-26T08:18:40 | 2017-04-26T08:18:40 | 58,909,291 | 0 | 0 | null | 2016-05-16T06:49:11 | 2016-05-16T06:34:21 | R | UTF-8 | R | false | false | 961 | rd | plotDensities.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/plotDensities.R
\name{plotDensities}
\alias{plotDensities}
\title{plotDensities}
\usage{
plotDensities(s, col = NULL, xlim = NULL, ylim = NULL, na.rm = TRUE,
main = "", xlab = "", ylab = "Relative Frequency", ...)
}
\arguments{
\item{s}{A \code{list} or \code{matrix}}
\item{col}{Colours to use (optional)}
\item{xlim}{xlim to use, if not supplied are calculated
from data ranges}
\item{ylim}{ylim to use, if not supplied are calculated
from data ranges}
\item{na.rm}{Whether to remove NA values when calculating
densites}
\item{main}{Title for the plot}
\item{xlab}{xlab to use}
\item{ylab}{ylab to use}
\item{...}{Additional parameters passed on to
\code{plot}}
}
\value{
Called for the side effect of plotting
}
\description{
Plots multiple calls to \code{density} on the same plot
}
\author{
Aaron Statham <a.statham@garvan.org.au>
}
|
2391f60ccb67a9b233c55fa6a0e866bd8b10efde | 21845f139f8c4dcc3a21b133a9d3e6620eb89bc4 | /man/cockroachALData.Rd | b81b0e3490a3ce7c48737e700fa1d22df2ba42b0 | [] | no_license | cran/STAR | 257a4e63d61f3a5b664b1fa6770d2383096cddb9 | c88668ba8a508206fdc5ef4406b6373c492f2806 | refs/heads/master | 2021-01-13T01:49:39.256279 | 2012-10-08T00:00:00 | 2012-10-08T00:00:00 | 17,693,622 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 14,254 | rd | cockroachALData.Rd | \name{cockroachAlData}
\alias{CAL1S}
\alias{CAL1V}
\alias{CAL2S}
\alias{CAL2C}
\alias{e060517spont}
\alias{e060517ionon}
\alias{e060817spont}
\alias{e060817terpi}
\alias{e060817citron}
\alias{e060817mix}
\alias{e060824spont}
\alias{e060824citral}
\alias{e070528spont}
\alias{e070528citronellal}
\docType{data}
\title{Spike Trains of several Cockroach Antennal Lobe Neurons
Recorded from Six Animals}
\description{
Four (\code{CAL1S} and \code{CAL1V}), three (\code{CAL2S} and
\code{CAL2C}), three (\code{e060517spont} and \code{e060517ionon}),
three (\code{e060817spont}, \code{e060817terpi}, \code{e060817citron}
and \code{e060817mix}), two (\code{e060824spont} and
\code{e060824citral}) and four (\code{e070528spont} and
\code{e070528citronellal}) Cockroach (\emph{Periplaneta americana}) antennal lobe neurons
(putative projection neurons) were recorded simultaneously
and extracellularly during spontaneous activity and odors (vanilin,
citral, citronellal, terpineol, beta-ionon)
responses from six different animals. The data sets contain the sorted
spike trains of the neurons.
}
\usage{
data(CAL1S)
data(CAL1V)
data(CAL2S)
data(CAL2C)
data(e060517spont)
data(e060517ionon)
data(e060817spont)
data(e060817terpi)
data(e060817citron)
data(e060817mix)
data(e060824spont)
data(e060824citral)
data(e070528spont)
data(e070528citronellal)
}
\format{
\code{CAL1S} is a named list with 4 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"},
\code{"neuron 4"}). Each
component contains the spike train (ie, action potentials occurrence
times) of one neuron recorded during 30 s of spontaneous
activity. \emph{Times are expressed in seconds}.
\code{CAL1V} is a named list with 4 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"},
\code{"neuron 4"}).
Each component is a named list
with 20 components: \code{"stim. 1"}, ..., \code{"stim. 20"}. Each
sub-list contains the spike train of one neuron during 1 stimulation
(odor puff) with \emph{vanillin}
(\url{http://en.wikipedia.org/wiki/Vanillin}). Each acquisition was 10
s long. The command
to the odor delivery valve was on between sec 4.49 and sec 4.99.
\code{CAL2S} is a named list with 3 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"}). Each
component contains the spike train (ie, action potentials occurrence
times) of one neuron recorded during 1 mn of spontaneous
activity. \emph{Times are expressed in seconds}.
\code{CAL2C} is a named list with 3 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"}).
Each component is a named list
with 20 components: \code{"stim. 1"}, ..., \code{"stim. 20"}. Each
sub-list contains the spike train of one neuron during 1 stimulation
(odor puff) with \emph{citral}
(\url{http://en.wikipedia.org/wiki/Citral}). Each acquisition was 14 s long. The command
to the odor delivery valve was on between sec 5.87 and sec 6.37.
\code{e060517spont} is a named list of with 3 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"}). Each
component is a \code{spikeTrain} object (ie, action potentials occurrence
times) of one neuron recorded during 61 s of spontaneous
activity. \emph{Times are expressed in seconds}.
\code{e060517ionon} is a named list with 3 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"}).
Each component is a \code{repeatedTrain} object
with 19 \code{spikeTrain} objects: \code{"stim. 1"}, ..., \code{"stim. 19"}. Each
\code{spikeTrain} contains the spike train of one neuron during 1 stimulation
(odor puff) with \emph{beta-ionon}
(\url{http://commons.wikimedia.org/wiki/Image:Beta-Ionon.svg}). Each acquisition was 15 s long. The command
to the odor delivery valve was on between sec 6.07 and sec 6.57.
\code{e060817spont} is a named list of with 3 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"}). Each
component is a \code{spikeTrain} object (ie, action potentials occurrence
times) of one neuron recorded during 60 s of spontaneous
activity. \emph{Times are expressed in seconds}.
\code{e060817terpi} is a named list with 3 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"}).
Each component is a \code{repeatedTrain} object
with 20 \code{spikeTrain} objects: \code{"stim. 1"}, ..., \code{"stim. 20"}. Each
\code{spikeTrain} contains the spike train of one neuron during 1 stimulation
(odor puff) with \emph{terpineol}
(\url{http://en.wikipedia.org/wiki/Terpineol}). Each acquisition was 15 s long. The command
to the odor delivery valve was on between sec 6.03 and sec 6.53.
\code{e060817citron} is a named list with 3 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"}).
Each component is a \code{repeatedTrain} object
with 20 \code{spikeTrain} objects: \code{"stim. 1"}, ..., \code{"stim. 20"}. Each
\code{spikeTrain} contains the spike train of one neuron during 1 stimulation
(odor puff) with \emph{citronellal}
(\url{http://en.wikipedia.org/wiki/Citronellal}). Each acquisition was 15 s long. The command
to the odor delivery valve was on between sec 5.99 and sec 6.49.
\code{e060817mix} is a named list with 3 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"}).
Each component is a \code{repeatedTrain} object
with 20 \code{spikeTrain} objects: \code{"stim. 1"}, ..., \code{"stim. 20"}. Each
\code{spikeTrain} contains the spike train of one neuron during 1 stimulation
(odor puff) with a mixture of \emph{terpinaol} and \emph{citronellal}
(the sum of the two previous stim.). Each acquisition was 15 s long. The command
to the odor delivery valve was on between sec 6.01 and sec 6.51.
\code{e060824spont} is a named list of with 2 components
(\code{"neuron 1"}, \code{"neuron 2"}). Each
component is a \code{spikeTrain} object (ie, action potentials occurrence
times) of one neuron recorded during 59 s of spontaneous
activity. \emph{Times are expressed in seconds}.
\code{e060824citral} is a named list with 2 components
(\code{"neuron 1"}, \code{"neuron 2"}).
Each component is a named list
with 20 components: \code{"stim. 1"}, ..., \code{"stim. 20"}. Each
sub-list contains the spike train of one neuron during 1 stimulation
(odor puff) with \emph{citral}
(\url{http://en.wikipedia.org/wiki/Citral}). Each acquisition was 15 s long. The command
to the odor delivery valve was on between sec 6.01 and sec 6.51.
\code{e070528spont} is a named list of with 4 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"},
\code{"neuron 4"}). Each
component is a \code{spikeTrain} object (ie, action potentials occurrence
times) of one neuron recorded during 60 s of spontaneous
activity. \emph{Times are expressed in seconds}.
\code{e070528citronellal} is a named list with 4 components
(\code{"neuron 1"}, \code{"neuron 2"}, \code{"neuron 3"},
\code{"neuron 4"}).
Each component is a \code{repeatedTrain} object
with 15 \code{spikeTrain} objects: \code{"stim. 1"}, ..., \code{"stim. 15"}. Each
\code{spikeTrain} contains the spike train of one neuron during 1 stimulation
(odor puff) with \emph{citronellal}
(\url{http://en.wikipedia.org/wiki/Citronellal}). Each acquisition was 13 s long. The command
to the odor delivery valve was on between sec 6.14 and sec 6.64.
}
\details{
Every \code{repeatedTrain} object of these data sets has an \code{attribute} named
\code{stimTimeCourse} containing the openng and closing times of the
odor delivery valve.
The data were recorded from neighboring sites on a \emph{NeuroNexus}
(\url{http://neuronexustech.com/}) silicon probe. Sorting was done
with \code{SpikeOMatic} with superposition resolution which can AND
DOES lead to artifcats on cross-correlograms.
}
\source{
Recording and spike sorting performed by Antoine Chaffiol
\email{antoine.chaffiol@univ-paris5.fr} at the Cerebral Physiology
Lab, CNRS UMR 8118:
\url{http://www.biomedicale.univ-paris5.fr/physcerv/physiologie_cerebrale.htm}.
}
\references{
\url{http://www.biomedicale.univ-paris5.fr/physcerv/C_Pouzat/Doc/ChaffiolEtAl_FENS2006.pdf}
}
\examples{
## load CAL1S data
data(CAL1S)
## convert the data into spikeTrain objects
CAL1S <- lapply(CAL1S,as.spikeTrain)
## look at the train of the 1sd neuron
CAL1S[["neuron 1"]]
## fit the 6 different renewal models to the 1st neuron spike train
compModels(CAL1S[["neuron 1"]])
## look at the ISI distribution with the fitted invgauss dist for
## this 1st neuron
isiHistFit(CAL1S[["neuron 1"]],model="invgauss")
## load CAL1V data
data(CAL1V)
## convert them to repeatedTrain objects
CAL1V <- lapply(CAL1V, as.repeatedTrain)
## look at the raster of the 1st neuron
CAL1V[["neuron 1"]]
## load e070528spont data
data(e070528spont)
## look at the spike train of the 1st neuron
e070528spont[["neuron 1"]]
## load e070528citronellal data
data(e070528citronellal)
## Get the stimulus time course
attr(e070528citronellal[["neuron 1"]],"stimTimeCourse")
## look at the raster of the 1st neuron
plot(e070528citronellal[["neuron 1"]],stim=c(6.14,6.64))
\dontrun{
## A "detailed" analysis of e060817 were 2 odors as well as there mixtures
## were used.
## Load the terpineol, citronellal and mixture response data
data(e060817terpi)
data(e060817citron)
data(e060817mix)
## get smooth psths with gsspsth0
e060817terpiN1PSTH <- gsspsth0(e060817terpi[["neuron 1"]])
e060817terpiN2PSTH <- gsspsth0(e060817terpi[["neuron 2"]])
e060817terpiN3PSTH <- gsspsth0(e060817terpi[["neuron 3"]])
e060817citronN1PSTH <- gsspsth0(e060817citron[["neuron 1"]])
e060817citronN2PSTH <- gsspsth0(e060817citron[["neuron 2"]])
e060817citronN3PSTH <- gsspsth0(e060817citron[["neuron 3"]])
e060817mixN1PSTH <- gsspsth0(e060817mix[["neuron 1"]])
e060817mixN2PSTH <- gsspsth0(e060817mix[["neuron 2"]])
e060817mixN3PSTH <- gsspsth0(e060817mix[["neuron 3"]])
## look at them
## Neuron 1
plot(e060817terpiN1PSTH,stimTimeCourse=attr(e060817terpi[["neuron 1"]],"stimTimeCourse"),colCI=2)
plot(e060817citronN1PSTH,stimTimeCourse=attr(e060817citron[["neuron 1"]],"stimTimeCourse"),colCI=2)
plot(e060817mixN1PSTH,stimTimeCourse=attr(e060817mix[["neuron 1"]],"stimTimeCourse"),colCI=2)
## Neuron 2
plot(e060817terpiN2PSTH,stimTimeCourse=attr(e060817terpi[["neuron 2"]],"stimTimeCourse"),colCI=2)
plot(e060817citronN2PSTH,stimTimeCourse=attr(e060817citron[["neuron 2"]],"stimTimeCourse"),colCI=2)
plot(e060817mixN2PSTH,stimTimeCourse=attr(e060817mix[["neuron 2"]],"stimTimeCourse"),colCI=2)
## Neuron 3
plot(e060817terpiN3PSTH,stimTimeCourse=attr(e060817terpi[["neuron 3"]],"stimTimeCourse"),colCI=2)
plot(e060817citronN3PSTH,stimTimeCourse=attr(e060817citron[["neuron 3"]],"stimTimeCourse"),colCI=2)
plot(e060817mixN3PSTH,stimTimeCourse=attr(e060817mix[["neuron 3"]],"stimTimeCourse"),colCI=2)
## Make now fancier plots with superposed psths ####
## Take into account the fact that the stimuli onsets are not identical
## Neuron 1
plot(e060817mixN1PSTH$mids-0.02,e060817mixN1PSTH$ciUp,type="n",ylim=c(0,max(e060817mixN1PSTH$ciUp)),xlim=c(5,14),xlab="Time (s)",ylab="Firing rate (Hz)",main="Neuron 1 e060817")
rect(5.99,0,6.49,max(e060817mixN1PSTH$ciUp),col="grey80",border=NA)
abline(h=0)
polygon(c(e060817mixN1PSTH$mids-0.02,rev(e060817mixN1PSTH$mids-0.02)),c(e060817mixN1PSTH$ciLow,rev(e060817mixN1PSTH$ciUp)),col=rgb(1,0,1,0.5),border=NA)
polygon(c(e060817citronN1PSTH$mids,rev(e060817citronN1PSTH$mids)),c(e060817citronN1PSTH$ciLow,rev(e060817citronN1PSTH$ciUp)),col=rgb(1,0,0,0.5),border=NA)
polygon(c(e060817terpiN1PSTH$mids-0.04,rev(e060817terpiN1PSTH$mids-0.04)),c(e060817terpiN1PSTH$ciLow,rev(e060817terpiN1PSTH$ciUp)),col=rgb(0,0,1,0.5),border=NA)
lines(e060817terpiN1PSTH$mids-0.04,e060817terpiN1PSTH$freq,col=rgb(0,0,1),lwd=2)
lines(e060817citronN1PSTH$mids,e060817citronN1PSTH$freq,col=rgb(1,0,0),lwd=2)
lines(e060817mixN1PSTH$mids-0.02,e060817mixN1PSTH$freq,col=rgb(0,0,0),lwd=2)
legend(8,0.9*max(e060817mixN1PSTH$ciUp),c("Terpineol","Citronellal","Mixture"),col=c(4,2,1),lwd=2)
## Neuron 2
plot(e060817mixN2PSTH$mids-0.02,e060817mixN2PSTH$ciUp,type="n",ylim=c(0,max(e060817mixN2PSTH$ciUp)),xlim=c(5,14),xlab="Time (s)",ylab="Firing rate (Hz)",main="Neuron 2 e060817")
rect(5.99,0,6.49,max(e060817mixN2PSTH$ciUp),col="grey80",border=NA)
abline(h=0)
polygon(c(e060817mixN2PSTH$mids-0.02,rev(e060817mixN2PSTH$mids-0.02)),c(e060817mixN2PSTH$ciLow,rev(e060817mixN2PSTH$ciUp)),col=rgb(1,0,1,0.5),border=NA)
polygon(c(e060817citronN2PSTH$mids,rev(e060817citronN2PSTH$mids)),c(e060817citronN2PSTH$ciLow,rev(e060817citronN2PSTH$ciUp)),col=rgb(1,0,0,0.5),border=NA)
polygon(c(e060817terpiN2PSTH$mids-0.04,rev(e060817terpiN2PSTH$mids-0.04)),c(e060817terpiN2PSTH$ciLow,rev(e060817terpiN2PSTH$ciUp)),col=rgb(0,0,1,0.5),border=NA)
lines(e060817terpiN2PSTH$mids-0.04,e060817terpiN2PSTH$freq,col=rgb(0,0,1),lwd=2)
lines(e060817citronN2PSTH$mids,e060817citronN2PSTH$freq,col=rgb(1,0,0),lwd=2)
lines(e060817mixN2PSTH$mids-0.02,e060817mixN2PSTH$freq,col=rgb(0,0,0),lwd=2)
legend(8,0.9*max(e060817mixN2PSTH$ciUp),c("Terpineol","Citronellal","Mixture"),col=c(4,2,1),lwd=2)
## Neuron 3
plot(e060817mixN3PSTH$mids-0.02,e060817mixN3PSTH$ciUp,type="n",ylim=c(0,max(e060817mixN3PSTH$ciUp)),xlim=c(5,14),xlab="Time (s)",ylab="Firing rate (Hz)",main="Neuron 3 e060817")
rect(5.99,0,6.49,max(e060817mixN3PSTH$ciUp),col="grey80",border=NA)
abline(h=0)
polygon(c(e060817mixN3PSTH$mids-0.02,rev(e060817mixN3PSTH$mids-0.02)),c(e060817mixN3PSTH$ciLow,rev(e060817mixN3PSTH$ciUp)),col=rgb(1,0,1,0.5),border=NA)
polygon(c(e060817citronN3PSTH$mids,rev(e060817citronN3PSTH$mids)),c(e060817citronN3PSTH$ciLow,rev(e060817citronN3PSTH$ciUp)),col=rgb(1,0,0,0.5),border=NA)
polygon(c(e060817terpiN3PSTH$mids-0.04,rev(e060817terpiN3PSTH$mids-0.04)),c(e060817terpiN3PSTH$ciLow,rev(e060817terpiN3PSTH$ciUp)),col=rgb(0,0,1,0.5),border=NA)
lines(e060817terpiN3PSTH$mids-0.04,e060817terpiN3PSTH$freq,col=rgb(0,0,1),lwd=2)
lines(e060817citronN3PSTH$mids,e060817citronN3PSTH$freq,col=rgb(1,0,0),lwd=2)
lines(e060817mixN3PSTH$mids-0.02,e060817mixN3PSTH$freq,col=rgb(0,0,0),lwd=2)
legend(8,0.9*max(e060817mixN3PSTH$ciUp),c("Terpineol","Citronellal","Mixture"),col=c(4,2,1),lwd=2)
}
}
\keyword{datasets}
|
b9af70a86ed65c5fe706ce695d4d97906ee2abe8 | dc220af6e3f9d80885301728097bf14f3a425bfd | /Plot4.R | d26f66f63ecc7ac7dab9c6d35ad56a985da26913 | [] | no_license | GregRicciCPA/EExploratory-Data-Analysis-projecet | f62d628fce17fba2f6ad30e6260e736b764baabf | d825b8fd9dcdf628ff0c08a1037fd63b7f1b6c00 | refs/heads/master | 2020-05-19T10:25:18.704922 | 2019-05-05T03:11:32 | 2019-05-05T03:11:32 | 184,970,764 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,891 | r | Plot4.R | ## Plot1 Code
## Exploritory Data Analysis - Peer-graded Assignment: Course Project 1
## Greg Ricci
## The Dataset Dataset: Electric power consumption [20Mb]
## was downloaded and unzipped to local directory due to size
getwd()
setwd("C:/Users/Anthony/Documents/Coursera/ExpDataC4")
xdata<- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors = FALSE) # pull september data
xdata$Date<-as.Date(xdata$Date,format="%d/%m/%Y")
subextract<-xdata[(xdata$Date=="2007-02-01") | (xdata$Date=="2007-02-02"),]
head(subextract)
dim(subextract)
summary(subextract)
View(subextract)
## Data clarifications
subextract$Global_active_power<-suppressWarnings(as.numeric(subextract[,"Global_active_power"]))
subextract$Global_reactive_power <- suppressWarnings(as.numeric(subextract[,"Global_reactive_power"]))
subextract$Voltage <- suppressWarnings(as.numeric(subextract[,"Voltage"]))
subextract$Sub_metering_1<-suppressWarnings(as.numeric(subextract[,"Sub_metering_1"]))
subextract$Sub_metering_2<-suppressWarnings(as.numeric(subextract[,"Sub_metering_2"]))
subextract$Sub_metering_3<-suppressWarnings(as.numeric(subextract[,"Sub_metering_3"]))
## remove NA varaiables
library("dplyr", lib.loc="~/R/win-library/3.5")
xdataF <- filter(subextract, Global_active_power|NA)
View(xdataF)
# Date-time format
datet<-strptime(paste(subextract$Date,subextract$Time,sep = " "), "%Y-%m-%d %H:%M:%S" )
#
xdataF$Global_active_power<-suppressWarnings(as.numeric(xdataF[,"Global_active_power"]))
xdataF$Global_reactive_power <- suppressWarnings(as.numeric(xdataF[,"Global_reactive_power"]))
xdataF$Voltage <- suppressWarnings(as.numeric(xdataF[,"Voltage"]))
xdataF$Sub_metering_1<-suppressWarnings(as.numeric(xdataF[,"Sub_metering_1"]))
xdataF$Sub_metering_2<-suppressWarnings(as.numeric(xdataF[,"Sub_metering_2"]))
xdataF$Sub_metering_3<-suppressWarnings(as.numeric(xdataF[,"Sub_metering_3"]))
## Create Plots
#plot1 <- function() {
# hist(xdataF$Global_active_power,main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
# dev.copy(png, file="plot1.png", width=480, height=480)
# dev.off()
# cat("Plot1.png has been saved in", getwd())
#}
#plot1()
#
##plot2
#plot2 <- function() {
# plot(datet,xdataF$Global_active_power, ylab='Global Active Power (kilowatts)', xlab='', type='l')
# dev.copy(png, file="plot2.png", width=480, height=480, units = 'px')
# dev.off()
# cat("plot2.png has been saved in", getwd())
#}
#plot2()
#
##plot3
#plot3 <- function() {
# plot(datet,xdataF$Sub_metering_1, type="l", col ="black" ,xlab="", ylab="Energy sub metering")
# lines(datet,xdataF$Sub_metering_2,col="red")
# lines(datet,xdataF$Sub_metering_3,col="blue")
# legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
# dev.copy(png, file="plot3.png", width=480, height=480)
# dev.off()
# cat("plot3.png has been saved in", getwd())
#}
#plot3()
#plot4
plot4 <- function() {
png("plot4.png",width = 480, height = 480)
par(mfrow=c(2,2))
# Plot 1
plot(datet,xdataF$Global_active_power, ylab='Global Active Power (kilowatts)', xlab='', type='l')
# Plot 2
plot(datet,xdataF$Voltage,type = "l",xlab = "datetime",ylab = "Voltage")
# Plot 3
plot(datet,xdataF$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(datet,xdataF$Sub_metering_2,col="red")
lines(datet,xdataF$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), bty="n", cex=.5)
#Plot 4
plot(datet,xdataF$Global_reactive_power,type = "l",xlab = "datetime",ylab = "Global_reactive_power")
dev.off()
cat("Plot4.png has been saved in", getwd())
}
plot4() |
c2d81239a3bfd844f34b0274c2503b3b36a5d450 | aca1a4254acf2f73e026b732fbc57d67a246063c | /src/02b-parameterize_models.R | ed0574d55e25f454213bdbfeda9a81b5cbc011d1 | [] | no_license | timriffe/rbx2020 | 1d629a3c32fd7cac909f3396f2b1eb3568b67d75 | b1d97c58c20ad410c3be5bde8c1a839f5f36c8eb | refs/heads/main | 2023-06-06T21:03:57.879013 | 2021-07-09T15:48:48 | 2021-07-09T15:48:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,962 | r | 02b-parameterize_models.R | # Parameterize expected death models
# A list specifying all the models to be tested, and their
# parametrizations. See specify_models.R for the exact
# implementation of the models.
# Init ------------------------------------------------------------
library(dplyr)
library(here); library(glue)
wd <- here()
setwd(wd)
cnst <- list()
cnst <- within(cnst, {
path_mod_para = glue('tmp/mod_para.rds')
})
# Specifications of models to test --------------------------------
mod_para <-
tribble(
~model_id, ~model_class, ~model_para,
# ... AVG ---------------------------------------------------------
'AVGc5', 'glm', list(
models = list(formula(
deaths_observed ~
# single coefficient for every weeks
as.factor(iso_week)
)),
family = quasipoisson(link = 'log'),
n_years_for_training = 5,
weeks_for_training = NULL
),
'AVGr5', 'glm', list(
models = list(formula(
deaths_observed ~
# single coefficient for every weeks
as.factor(iso_week) +
offset(log(personweeks))
)),
family = quasipoisson(link = 'log'),
n_years_for_training = 5,
weeks_for_training = NULL
),
# ... SRF ---------------------------------------------------------
# Euromomo style Serfling
# https://github.com/EuroMOMOnetwork/MOMO/blob/master/R/excess.R
# AIC selection of seasonality
'SRFcem', 'glm', list(
models = list(
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# seasonality
# full year period
sin(2*pi/52*iso_week) +
cos(2*pi/52*iso_week) +
# half year period
sin(2*pi/26*iso_week) +
cos(2*pi/26*iso_week) +
# adjustment for special weeks
holiday3
),
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# seasonality
# full year period
sin(2*pi/52*iso_week) +
cos(2*pi/52*iso_week) +
# adjustment for special weeks
holiday3
),
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# adjustment for special weeks
holiday3
)
),
family = quasipoisson(link = 'log'),
weeks_for_training = c(15:26, 36:45),
n_years_for_training = NULL
),
# Forecasting Serfling without exposures
# AIC selection of seasonality
'SRFc', 'glm', list(
models = list(
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# seasonality
# full year period
sin(2*pi/52*iso_week) +
cos(2*pi/52*iso_week) +
# half year period
sin(2*pi/26*iso_week) +
cos(2*pi/26*iso_week) +
# adjustment for special weeks
holiday3
),
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# seasonality
# full year period
sin(2*pi/52*iso_week) +
cos(2*pi/52*iso_week) +
# adjustment for special weeks
holiday3
),
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# adjustment for special weeks
holiday3
)
),
family = quasipoisson(link = 'log'),
weeks_for_training = NULL,
n_years_for_training = NULL
),
# Forecasting Serfling with exposures
# AIC selection of seasonality
'SRFr', 'glm', list(
models = list(
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# seasonality
# full year period
sin(2*pi/52*iso_week) +
cos(2*pi/52*iso_week) +
# half year period
sin(2*pi/26*iso_week) +
cos(2*pi/26*iso_week) +
# adjustment for special weeks
holiday3 +
# exposures
offset(log(personweeks))
),
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# seasonality
# full year period
sin(2*pi/52*iso_week) +
cos(2*pi/52*iso_week) +
# adjustment for special weeks
holiday3 +
# exposures
offset(log(personweeks))
),
formula(
deaths_observed ~
# log linear long term trend
origin_weeks +
# adjustment for special weeks
holiday3 +
# exposures
offset(log(personweeks))
)
),
family = quasipoisson(link = 'log'),
weeks_for_training = NULL,
n_years_for_training = NULL
),
# ... GAM ---------------------------------------------------------
# Gam without temperature
'GAMr', 'gam', list(
formula = formula(
deaths_observed ~
# log linear long term trend
origin_weeks*stratum_id +
# penalized cyclic spline for seasonality
s(iso_week, bs = 'cp', k = 12, by = stratum_id) +
# adjustment for special weeks
holiday3*stratum_id +
# exposures
offset(log(personweeks))
),
family = quasipoisson(link = 'log')
),
# Gam with temperature
'GAMrt', 'gam', list(
formula = formula(
deaths_observed ~
# log linear long term trend
origin_weeks*stratum_id +
# penalized cyclic spline for seasonality
s(iso_week, bs = 'cp', k = 12, by = stratum_id) +
# temperature effect
s(iso_week, bs = 'cp', k = 12, by = temperature_anomaly) +
# adjustment for special weeks
holiday3*stratum_id +
# exposures
offset(log(personweeks))
),
family = quasipoisson(link = 'log')
),
# ... LGM ---------------------------------------------------------
# Kontis style LGM without temperature effect
'LGMr', 'lgm', list(
formula = formula(
death ~
1 +
global_slope +
holiday +
f(time_ar,
model = 'ar1',
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
) +
f(time_seas,
model = 'seasonal', season.length = 52,
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
) +
f(resid_iid,
model = 'iid',
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
)
)
),
# Kontis style LGM with temperature effect
'LGMrt', 'lgm', list(
formula = formula(
death ~
1 +
global_slope +
holiday +
tanomaly +
f(time_ar,
model = 'ar1',
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
) +
f(time_seas,
model = 'seasonal', season.length = 52,
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
) +
f(week_rw, tanomaly,
model = 'rw1', cyclic = TRUE,
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
) +
f(resid_iid,
model = 'iid',
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
)
)
),
# Kontis style LGM with temperature effect and different order residuals
'LGMrt2', 'lgm', list(
formula = formula(
death ~
1 +
global_slope +
holiday +
tanomaly +
f(time_ar,
model = 'ar', order = 2,
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
) +
f(time_seas,
model = 'seasonal', season.length = 52,
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
) +
# effect of temperature anomaly varies in a cyclic fashion
# over the week of a year
f(week_rw, tanomaly,
model = 'rw2', cyclic = TRUE,
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
) +
# independent remaining errors
f(resid_iid,
model = 'iid',
hyper = list(prec = list(prior = 'loggamma', param = c(0.001, 0.001)))
)
)
)
)
# Export ----------------------------------------------------------
saveRDS(mod_para, file = cnst$path_mod_para)
|
3b7d1d38005d7d4af603dd439f145077b9d94bc7 | d2a7b8c2dd7a9fc2afb86703eac631f2414417e4 | /TLBC/man/loadPredictions.Rd | 1a647bcc9f5b29dbcae2494bf01594098de1704b | [] | no_license | MatthewWilletts/Activity | 2c034998fadbdd6dab1f186b43aa38d19a348ce8 | e7efb260f5e4ac532644748bc32c68a80bf1ff9c | refs/heads/master | 2021-01-21T14:23:25.701201 | 2017-01-17T22:51:41 | 2017-01-17T22:51:41 | 56,143,670 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 813 | rd | loadPredictions.Rd | \name{loadPredictions}
\alias{loadPredictions}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Load predictions
}
\description{
Function to load predictions from csv files in a directory.
}
\usage{
loadPredictions(predDir, names=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{predDir}{
Path to directory containing prediction files.
}
\item{names}{
(Optional) If specified, load only predictions for identifiers in this list.
}
}
\value{
A data frame of predictions.
}
\author{
Katherine Ellis
}
\seealso{
\code{\link{loadPredictionsAndLabels}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
f9a2755e37081bbb08465059a36f13544cef9676 | 67375468b45aef18d168e1d8eb21bcbda2ea144b | /revision_denovo_STOMP.R | 0999f87e431dce9d48b232d485459ee7f01cda4b | [] | no_license | ybaik10/clinicalriskscore | 2874bb4c408931258a3ad1db935ce62ed11e5686 | a8e3c40e4cd77ecb57a09de491a2c9805964ee98 | refs/heads/master | 2022-12-27T07:44:43.588369 | 2020-10-13T15:29:26 | 2020-10-13T15:29:26 | 299,670,964 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,485 | r | revision_denovo_STOMP.R | ######################################################################################################################################################################################################
################################### Xpert TB case definition #########################################################################################################################################
st.xp <- read.csv('/Users/ys/Documents/Stomp/Stomp_finaldata_Xpertcasedefinition_fromR_10202019.csv', header = TRUE)
st.xp %>% count(xpertcc)
## age variable management ##
st.xp$agecat5[st.xp$agecat=="(54,68]"|st.xp$agecat=="(14,24]"]<-0
st.xp$agecat5[st.xp$agecat=="(44,54]"]<-1
st.xp$agecat5[st.xp$agecat=="(24,34]"|st.xp$agecat=="(34,44]"]<-2
## random sampling ##
case <- st.xp[which(st.xp$xpertcc==1),]
cont <- st.xp[which(st.xp$xpertcc==0),]
case.rs <- case[sample(nrow(case), 68),]
cont.match <- cont[((cont$id3_cc %in% case.rs$id3_cc)|(cont$id3_com %in% case.rs$id3_com)),]
st.xp.rs <- rbind(case.rs, cont.match) #Used in building a model
case.rs.exc <- case[!(case$record_id %in% case.rs$record_id),]
cont.match.exc <- cont[!(cont$record_id %in% cont.match$record_id),]
st.xp.rs.exc <- rbind(case.rs.exc, cont.match.exc) #Used in internal validation
## random samples SHOULD be saved to replicate the numbers ##
write.csv(st.xp.rs, "Sample_xpert_model.csv")
write.csv(st.xp.rs.exc, "Sample_xpert_valid.csv")
st.xp.1 <- rbind(st.xp.rs, st.xp.rs.exc)
## 10% prevalence scenario ##
st.xp.rs.10 <- rbind(st.xp.rs, st.xp.rs %>% filter(xpertcc==0), st.xp.rs %>% filter(xpertcc==0), st.xp.rs %>% filter(xpertcc==0), st.xp.rs %>% filter(xpertcc==0))
st.xp.rs.exc.10 <- rbind(st.xp.rs.exc, st.xp.rs.exc %>% filter(xpertcc==0), st.xp.rs.exc %>% filter(xpertcc==0), st.xp.rs.exc %>% filter(xpertcc==0), st.xp.rs.exc %>% filter(xpertcc==0))
st.xp.10 <- rbind(st.xp, st.xp %>% filter(xpertcc==0), st.xp %>% filter(xpertcc==0), st.xp %>% filter(xpertcc==0), st.xp %>% filter(xpertcc==0))
################################### All TB case definition ########################################################################################################################
st.all <- read.csv('/Users/ys/Documents/Stomp/Stomp_finaldata_Allcasedefinition_fromR_10202019.csv', header = TRUE)
st.all %>% count(casepr)
## random sampling already done ##
dt.rs <- read.csv("/Users/ys/Documents/Stomp/Sample.pop.model.csv", header = TRUE)
dt.rs.exc <- read.csv("/Users/ys/Documents/Stomp/Sample.pop.valid.csv", header = TRUE)
dt.rs %>% count(casepr)
dt.rs.exc %>% count(casepr)
## age variable management ##
dt.rs$agecat5[dt.rs$agecat=="(54,68]"|dt.rs$agecat=="(14,24]"]<-0
dt.rs$agecat5[dt.rs$agecat=="(44,54]"]<-1
dt.rs$agecat5[dt.rs$agecat=="(24,34]"|dt.rs$agecat=="(34,44]"]<-2
dt.rs$agecat6[dt.rs$agecat=="(44,54]"|dt.rs$agecat=="(54,68]"]<-0
dt.rs$agecat6[dt.rs$agecat=="(14,24]"]<-1
dt.rs$agecat6[dt.rs$agecat=="(24,34]"]<-2
dt.rs$agecat6[dt.rs$agecat=="(34,44]"]<-3
dt.rs.exc$agecat5[dt.rs.exc$agecat=="(54,68]"|dt.rs.exc$agecat=="(14,24]"]<-0
dt.rs.exc$agecat5[dt.rs.exc$agecat=="(44,54]"]<-1
dt.rs.exc$agecat5[dt.rs.exc$agecat=="(24,34]"|dt.rs.exc$agecat=="(34,44]"]<-2
dt.rs.exc$agecat6[dt.rs.exc$agecat=="(44,54]"|dt.rs.exc$agecat=="(54,68]"]<-0
dt.rs.exc$agecat6[dt.rs.exc$agecat=="(14,24]"]<-1
dt.rs.exc$agecat6[dt.rs.exc$agecat=="(24,34]"]<-2
dt.rs.exc$agecat6[dt.rs.exc$agecat=="(34,44]"]<-3
## 10% prevalence scenario ##
dt.rs10<-rbind(dt.rs, dt.rs %>% filter(casepr==0), dt.rs %>% filter(casepr==0), dt.rs %>% filter(casepr==0), dt.rs %>% filter(casepr==0))
dt.rs.exc10<-rbind(dt.rs.exc, dt.rs.exc %>% filter(casepr==0), dt.rs.exc %>% filter(casepr==0), dt.rs.exc %>% filter(casepr==0), dt.rs.exc %>% filter(casepr==0))
###########################STOMP de novo external validation on Kharitode multiple imputation dataset###############################
newfd <- read.csv("/Users/ys/kharitode/newfd.csv", header=T)
newfd$simple.external <- newfd$agecat2 + newfd$sexcat + newfd$hivcat1 + newfd$n_tbsymp + newfd$symp_2wks + newfd$eversmoke
newfd.simple.external <- newfd %>% dplyr::select(simple.external, n_tbsymp, symp_2wks, agecat2, sexcat, hivcat1, dbcat, symp_fac___1cat, symp_fac___2cat, symp_fac___3cat, symp_fac___4cat, n_other_sympcat, edu8, pasttb, eversmoke, lungcat, xpert)
newfd.simple.external[] <- lapply(newfd.simple.external, function(x){return(as.factor(x))})
imputed.simple.external<- mice(newfd.simple.external, m=15, maxit=10, method=c(rep('pmm',2),'logreg','polyreg',rep('logreg',13)))
summary(imputed.simple.external)
newfd.10 <- rbind(newfd, newfd%>%filter(xpert==0), newfd%>%filter(xpert==0), newfd%>%filter(xpert==0), newfd%>%filter(xpert==0), newfd%>%filter(xpert==0), newfd%>%filter(xpert==0), newfd%>%filter(xpert==0))
newfd10.simple.external <- newfd.10 %>% dplyr::select(simple.external, n_tbsymp, symp_2wks, agecat2, sexcat, hivcat1, dbcat, symp_fac___1cat, symp_fac___2cat, symp_fac___3cat, symp_fac___4cat, n_other_sympcat, edu8, pasttb, eversmoke, lungcat, xpert)
newfd10.simple.external[] <- lapply(newfd10.simple.external, function(x){return(as.factor(x))})
imputed10.simple.external<- mice(newfd10.simple.external, m=15, maxit=10, method=c(rep('pmm',2),'logreg','polyreg',rep('logreg',13)))
summary(imputed10.simple.external)
######################################################################################################################################################################################################
######################################################################################################################################################################################################
#### Make a risk score
### Use 'all' case definition
## A simple risk score
# model-derived
dt.rs$simple <- dt.rs$agecat5 + dt.rs$sex_female1 + dt.rs$hivarv + dt.rs$n_tb_symps + dt.rs$symps_weeks_cat2 + dt.rs$eversmoke + dt.rs$coughsint_1
# internal validation
dt.rs.exc$simple <- dt.rs.exc$agecat5 + dt.rs.exc$sex_female1 + dt.rs.exc$hivarv + dt.rs.exc$n_tb_symps + dt.rs.exc$symps_weeks_cat2 + dt.rs.exc$eversmoke + dt.rs.exc$coughsint_1
# 10% model-derived
dt.rs10$simple <- dt.rs10$agecat5 + dt.rs10$sex_female1 + dt.rs10$hivarv + dt.rs10$n_tb_symps + dt.rs10$symps_weeks_cat2 + dt.rs10$eversmoke + dt.rs10$coughsint_1
# 10% internal validation
dt.rs.exc10$simple <- dt.rs.exc10$agecat5 + dt.rs.exc10$sex_female1 + dt.rs.exc10$hivarv + dt.rs.exc10$n_tb_symps + dt.rs.exc10$symps_weeks_cat2 + dt.rs.exc10$eversmoke + dt.rs.exc10$coughsint_1
## A coefficient risk score
glm(casepr ~ as.factor(agecat) + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(link="logit"), data=dt.rs)
exp<-glm(casepr ~ as.factor(agecat6) + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(link="logit"), data=dt.rs)
exp.c<-glm(casepr ~ as.factor(agecat6) + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(), data=dt.rs.exc)
# model-derived
derived.coeff <- glm(casepr ~ as.factor(agecat5) + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(link="logit"), data=dt.rs)
# internal validation
internal.coeff <- glm(casepr ~ agecat5 + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(), data=dt.rs.exc)
# 10% model-derived
derived10.coeff <- glm(casepr ~ dt.rs10$agecat5 + dt.rs10$sex_female1 + dt.rs10$hivarv + dt.rs10$n_tb_symps + dt.rs10$symps_weeks_cat2 + dt.rs10$eversmoke + dt.rs10$coughsint_1, family=binomial, data=dt.rs10)
# 10% internal validation
internal10.coeff <- glm(casepr ~ dt.rs.exc10$agecat5 + dt.rs.exc10$sex_female1 + dt.rs.exc10$hivarv + dt.rs.exc10$n_tb_symps + dt.rs.exc10$symps_weeks_cat2 + dt.rs.exc10$eversmoke + dt.rs.exc10$coughsint_1, family=binomial, data=dt.rs.exc10)
### Use TB case definition
## A simple risk score
# model-derived
st.xp.rs$simple <- st.xp.rs$agecat5 + st.xp.rs$sex_female1 + st.xp.rs$hivarv + st.xp.rs$n_tb_symps + st.xp.rs$symps_weeks_cat2 + st.xp.rs$eversmoke + st.xp.rs$coughsint_1
# internal validation
st.xp.rs.exc$simple <- st.xp.rs.exc$agecat5 + st.xp.rs.exc$sex_female1 + st.xp.rs.exc$hivarv + st.xp.rs.exc$n_tb_symps + st.xp.rs.exc$symps_weeks_cat2 + st.xp.rs.exc$eversmoke + st.xp.rs.exc$coughsint_1
# overall
st.xp$simple <- st.xp$agecat5 + st.xp$sex_female1 + st.xp$hivarv + st.xp$n_tb_symps + st.xp$symps_weeks_cat2 + st.xp$eversmoke + st.xp$coughsint_1
# 10% model-derived
st.xp.rs.10$simple <- st.xp.rs.10$agecat5 + st.xp.rs.10$sex_female1 + st.xp.rs.10$hivarv + st.xp.rs.10$n_tb_symps + st.xp.rs.10$symps_weeks_cat2 + st.xp.rs.10$eversmoke + st.xp.rs.10$coughsint_1
# 10% internal validation
st.xp.rs.exc.10$simple <- st.xp.rs.exc.10$agecat5 + st.xp.rs.exc.10$sex_female1 + st.xp.rs.exc.10$hivarv + st.xp.rs.exc.10$n_tb_symps + st.xp.rs.exc.10$symps_weeks_cat2 + st.xp.rs.exc.10$eversmoke + st.xp.rs.exc.10$coughsint_1
# 10% overall
## A coefficient risk score
# model-derived
xderived.coeff <- glm(xpertcc ~ agecat5 + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(link="logit"), data=st.xp.rs)
# internal validation
xinternal.coeff <- glm(xpertcc ~ agecat5 + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(), data=st.xp.rs.exc)
# overall
xoverall.coeff <- glm(xpertcc ~ agecat5 + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(), data=st.xp.1)
# 10% model-derived
xderived10.coeff <- glm(xpertcc ~ agecat5 + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial, data=st.xp.rs.10)
# 10% internal validation
xinternal10.coeff <- glm(xpertcc ~ agecat5 + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial, data=st.xp.rs.exc.10)
# 10% overall
xoverall10.coeff <- glm(xpertcc ~ agecat5 + sex_female1 + hivarv + n_tb_symps + symps_weeks_cat2 + eversmoke + coughsint_1, family=binomial(), data=st.xp.10)
###### ROC ######
par("mar")
par(mar=c(1,1,1,1))
# par(pty='s')
### All case & Xpert case definition, simple scoring own data (derivation, internal, and external)
original.roc <- with(imputed.simple.external, roc(xpert~ glm(xpert~as.numeric(simple.external), family=binomial())$fitted.values, plot=T, col=4, main="Discrimination in a simple scoring system \nwith the original data", bty='n'))
###################################### Kharitode multiple imputation ROC mean and 95%CI ######################################
# original.auc <- with(imputed.simple.external, roc(xpert~ glm(xpert~as.numeric(simple.external), family=binomial())$fitted.values)$auc)
# me = mean(unlist(original.auc$analyses))
# m = 15
# Vb = var(unlist(original.auc$analyses))
# Vw0 <- NA
# for(i in 1:15){
# Vw0[[i]] <- (original.auc$analyses[[i]]-me)^2
# }
# Vw = (1/(m-1))*sum(Vw0)
# SEp = sqrt(sum(Vb, Vw, Vb/m))
# t = 2.021
# # lambda = sum(Vb, Vb/m)/sum(Vb, Vw, Vb/m)
# # dfo = (m-1)/(lambda)^2
# # dfb = ((1634-15)+1)/((1634-15)+3)*(1634-15)*(1-lambda)
# # dfa = dfo*dfb/(dfo+dfb)
# # l95ci = me - qnorm(.975)*SEp/sqrt(m)
# # h95ci = me + qnorm(.975)*SEp/sqrt(m)
# l95ci = me - t*SEp
# h95ci = me + t*SEp
# me; l95ci; h95ci
##################################################################################################################
roc(dt.rs$casepr ~ dt.rs$simple, data=dt.rs, ci=T, plot=T, col=1, lty=1, add=T)
roc(casepr ~ simple, data=dt.rs.exc, ci=T, plot=T, col=2, lty=1, add=T)
# DON'T NEED TO SEPARATE XPERTCC because model was not derived by Xpert case definition
# roc(st.xp.rs$xpertcc ~ st.xp.rs$simple, ci=T, plot=T, col=1, lty=2, add=T)
# roc(xpertcc ~ simple, data=st.xp.rs.exc, ci=T, plot=T, col=2, lty=2, add=T)
roc(st.xp.1$xpertcc ~ st.xp.1$simple, ci=T, plot=T, col=3, lty=1, add=T)
# legend("bottomright",
# legend=c("Derivation population (All), \nc-statistic=0.793 (0.737-0.849)", "Internal validation (All), \nc-statistic=0.812 (0.730-0.893)",
# "Derivation population (Xpert), \nc-statistic=0.766 (0.694-0.839)", "Internal validation (Xpert), \nc-statistic=0.852 (0.773-0.931)",
# "External validation, \nc-statistic =0.781 (0.776-0.785)"),
# col=c(1,2,1,2,4), lty=c(1,1,2,2,1), cex=0.7, box.lty=0, bty = "n")
legend("right",
legend=c("Derivation population, \nc-statistic=0.793 (0.737-0.849)\n", "Internal validation, \nc-statistic=0.812 (0.730-0.893)\n",
"Xpert case definition, \nc-statistic=0.795 (0.740-0.850)\n", "External validation, \nc-statistic =0.781 (0.776-0.785)\n"),
col=c(1,2,3,4), lty=c(1,1,1,1), cex=0.7, box.lty=0, bty = "n")
### All case & Xpert case definition, coeff scoring own data (derivation, internal, and external)coeforiginal.roc <- with(imputed.original, roc(xpert~ glm(xpert~n_tbsymp+symp_2wks+agecat2+sexcat+hivcat1+dbcat+n_other_sympcat, family=binomial())$fitted.values, plot=T)) #ci=T
coeforiginal.roc <- with(imputed.simple.external, roc(xpert~ glm(xpert~n_tbsymp+symp_2wks+agecat2+sexcat+hivcat1+dbcat+n_other_sympcat, family=binomial())$fitted.values, plot=T, col=4, main="Discrimination in a coefficient scoring system \nwith the original data", bty='n')) #ci=T
###################################### Kharitode multiple imputation ROC mean and 95%CI ######################################
# coeforiginal.auc <- with(imputed.simple.external, roc(xpert~ glm(xpert~n_tbsymp+symp_2wks+agecat2+sexcat+hivcat1+dbcat+n_other_sympcat, family=binomial())$fitted.values)$auc)
# me = mean(unlist(coeforiginal.auc$analyses))
# m = 15
# Vb = var(unlist(coeforiginal.auc$analyses))
# Vw0 <- NA
# for(i in 1:15){
# Vw0[[i]] <- (coeforiginal.auc$analyses[[i]]-me)^2
# }
# Vw = (1/(m-1))*sum(Vw0)
# SEp = sqrt(sum(Vb, Vw, Vb/m))
# t = 2.021
# # lambda = sum(Vb, Vb/m)/sum(Vb, Vw, Vb/m)
# # dfo = (m-1)/(lambda)^2
# # dfb = ((1634-15)+1)/((1634-15)+3)*(1634-15)*(1-lambda)
# # dfa = dfo*dfb/(dfo+dfb)
# l95ci = me - t*SEp
# h95ci = me + t*SEp
# me; l95ci; h95ci
##################################################################################################################
roc(casepr ~ derived.coeff$fitted.values, data=dt.rs, ci=T, plot=T, col=1, lty=1, add=T)
roc(dt.rs.exc[-c(39),]$casepr ~ internal.coeff$fitted.values, data=dt.rs.exc, ci=T, plot=T, col=2, lty=1, add=T)
# roc(xpertcc ~ xderived.coeff$fitted.values, data=st.xp.rs, ci=T, plot=T, col=1, lty=2, add=T)
# roc(xpertcc ~ xinternal.coeff$fitted.values, data=st.xp.rs.exc, ci=T, plot=T, col=2, lty=2, add=T)
roc(st.xp.1[-c(224),]$xpertcc ~ xoverall.coeff$fitted.values, ci=T, plot=T, col=3, lty=1, add=T)
legend("right",
legend=c("Derivation population, \nc-statistic=0.827 (0.777-0.877)\n", "Internal validation, \nc-statistic=0.837 (0.755-0.920)\n",
"Xpert case definition, \nc-statistic=0.825 (0.773-0.876)\n", "External validation, \nc-statistic =0.818 (0.814-0.823)\n"),
col=c(1,2,3,4), lty=c(1,1,1,1), cex=0.7, box.lty=0, bty = "n")
roc(casepr ~ exp$fitted.values, data=dt.rs, ci=T)
roc(dt.rs.exc[-c(39),]$casepr ~ exp.c$fitted.values, data=dt.rs.exc, ci=T)
roc(dt.rs.exc[-c(39),]$casepr ~ internal.coeff$fitted.values, data=dt.rs.exc, ci=T)
### All case & Xpert case definition, simple scoring 10% prevalence (derivation, internal, and external)
original10.roc <- with(imputed10.simple.external, roc(xpert~ glm(xpert~as.numeric(simple.external), family=binomial())$fitted.values, plot=T, col=4, main="Discrimination in a simple scoring system \nunder 10% TB prevalence", bty='n'))
# original10.auc <- with(imputed10.simple.external, roc(xpert~ glm(xpert~as.numeric(simple.external), family=binomial())$fitted.values)$auc)
# mean(unlist(original10.auc$analyses))
roc(dt.rs10$casepr ~ dt.rs10$simple, data=dt.rs10, plot=T, col=1, lty=1, add=T)
roc(casepr ~ simple, data=dt.rs.exc10, plot=T, col=2, lty=1, add=T)
roc(st.xp.10$xpertcc ~ st.xp.10$simple, plot=T, col=3, lty=1, add=T)
legend("right",
legend=c("Derivation population, c-statistic=0.793", "Internal validation, c-statistic=0.812",
"Xpert case definition, c-statistic=0.795", "External validation, c-statistic =0.780"),
col=c(1,2,3,4), lty=c(1,1,1,1), cex=0.7, box.lty=0, bty = "n")
### All case & Xpert case definition, coeff scoring 10% prevalence (derivation, internal, and external)coeforiginal.roc <- with(imputed.original, roc(xpert~ glm(xpert~n_tbsymp+symp_2wks+agecat2+sexcat+hivcat1+dbcat+n_other_sympcat, family=binomial())$fitted.values, plot=T)) #ci=T
coeforiginal10.roc <- with(imputed10.simple.external, roc(xpert~ glm(xpert~n_tbsymp+symp_2wks+agecat2+sexcat+hivcat1+dbcat+n_other_sympcat, family=binomial())$fitted.values, plot=T, col=4, main="Discrimination in a coefficient scoring system \nunder 10% TB prevalence", bty='n')) #ci=T
# coeforiginal10.auc <- with(imputed10.simple.external, roc(xpert~ glm(xpert~n_tbsymp+symp_2wks+agecat2+sexcat+hivcat1+dbcat+n_other_sympcat, family=binomial())$fitted.values)$auc)
# mean(unlist(coeforiginal10.auc$analyses))
roc(casepr ~ derived10.coeff$fitted.values, data=dt.rs10, plot=T, col=1, lty=1, add=T)
roc(dt.rs.exc10[-c(39),]$casepr ~ internal10.coeff$fitted.values, data=dt.rs.exc10, plot=T, col=2, lty=1, add=T)
roc(st.xp.10[-c(224),]$xpertcc ~ xoverall10.coeff$fitted.values, plot=T, col=3, lty=1, add=T)
legend("right",
legend=c("Derivation population, c-statistic=0.827", "Internal validation, c-statistic=0.836",
"Xpert case definition, c-statistic=0.828", "External validation, c-statistic =0.817"),
col=c(1,2,3,4), lty=c(1,1,1,1), cex=0.7, box.lty=0, bty = "n")
###### Calibration ######
st.xp.1$xpert <- st.xp.1$xpertcc
st.xp.1$simple.external <- st.xp.1$simple
st.xp.10$xpert <- st.xp.10$xpertcc
st.xp.10$simple.external <- st.xp.10$simple
# simple scoring
fit <- glm(xpert ~ simple.external, family=binomial(logit), data=st.xp.1)
fit10 <- glm(xpert ~ simple.external, family=binomial(logit), data=st.xp.10)
# example -- repeat in each dataset and compute mean and variance and plot manually
data1 <- mice::complete(imputed.simple.external,1); data1$simple.external <- as.numeric(as.character(data1$simple.external))
prob1 <- fit %>% predict(data1, type="response")
prob1.1 <- prob1[!is.na(prob1)]
prob1.2 <- prob1.1*(289/102)*(772/1634)
prob1.3 <- ifelse(prob1.2>=1, 0.999, prob1.2)
prob1.4 <- ifelse(prob1.2>=1, NA, prob1.2)
prob1.5 <- prob1.4[!is.na(prob1.4)]
data1$prob1.5 <- prob1.4
val1 <- val.prob.ci.2(prob1.5, as.numeric(as.character(data1[complete.cases(data1$prob1.5),]$xpert)), logit, logistic.cal = T)
val1<- val1[c(12,13)]
data101 <- mice::complete(imputed10.simple.external,1); data101$simple.external <- as.numeric(as.character(data101$simple.external))
prob101 <- fit10 %>% predict(data101, type="response")
prob101.1 <- prob101[!is.na(prob101)]
val101 <- val.prob.ci.2(prob101.1, as.numeric(as.character(data101$xpert)), logit, logistic.cal = T)
val101<- val101[c(12,13)]
# repeat in each dataset and compute mean and variance and plot manually
datax <- mice::complete(imputed.simple.external,15); datax$simple.external <- as.numeric(as.character(datax$simple.external))
probx <- fit %>% predict(datax, type="response")
probx.1 <- probx[!is.na(probx)]
probx.2 <- probx.1*(289/102)*(772/1634)
probx.3 <- ifelse(probx.2>=1, 0.999, probx.2)
valx <- val.prob.ci.2(probx.3, as.numeric(as.character(datax$xpert)), plot=F, logit, logistic.cal = T)
val15<-valx[c(12,13)]
valrbind <- as.data.frame(rbind(val1,val2,val3,val4,val5,val6,val7,val8,val9,val10,val11,val12,val13,val14,val15))
# m=15
# me=mean(valrbind$Intercept)
# vb=var(valrbind$Intercept)
# vw=(valrbind$Intercept-mean(valrbind$Intercept))^2 * (1/(m-1))
# SEp = sqrt(sum(vb, vw, vb/m))
# # # lambda = sum(vb, vb/m)/sum(vb, vw, vb/m)
# # # dfo = (m-1)/(lambda)^2
# # # dfb = ((1634-15)+1)/((1634-15)+3)*(1634-15)*(1-lambda)
# # # dfa = dfo*dfb/(dfo+dfb)
# t = 2.021
# l95ci = me - t*SEp
# h95ci = me + t*SEp
me; l95ci; h95ci # 0.68 (0.66 - 0.71)
# m=15
# me=mean(valrbind$Slope)
# vb=var(valrbind$Slope)
# vw=(valrbind$Slope-mean(valrbind$Slope))^2 * (1/(m-1))
# SEp = sqrt(sum(vb, vw, vb/m))
# # # lambda = sum(vb, vb/m)/sum(vb, vw, vb/m)
# # # dfo = (m-1)/(lambda)^2
# # # dfb = ((1634-15)+1)/((1634-15)+3)*(1634-15)*(1-lambda)
# # # dfa = dfo*dfb/(dfo+dfb)
# t = 2.021
# l95ci = me - t*SEp
# h95ci = me + t*SEp
me; l95ci; h95ci # 0.62 (0.60-0.64)
|
be596476d38e6b41a2b8784862a296e65e8b6438 | 110731767c52d468a6eb958fe2a7bd1d76b405fb | /man/set_vlp_input.Rd | 18c0e03671bd44a27deadcea9f5dc9e691e720b1 | [] | no_license | JFernandez696/rNodal | 1a7e2fc3ab9930bbc208701688e31bac9fe84ce6 | 09bc0fedbc0f5517233b8a2d2cbe94f659e5473a | refs/heads/master | 2021-10-20T00:09:33.150787 | 2017-10-27T19:35:22 | 2017-10-27T19:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,449 | rd | set_vlp_input.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_generics.R, R/class-VLP.R
\docType{methods}
\name{set_vlp_input}
\alias{set_vlp_input}
\alias{set_vlp_input,VLP-method}
\title{Set the VLP input for the well}
\usage{
set_vlp_input(object, ...)
\S4method{set_vlp_input}{VLP}(object, field.name = "HAGBR.GUO",
well.name = "ExampleGuo", depth.wh = 0, depth.bh = 9700,
diam.in = 1.995, GLR = 75, liq.rt = 758, wcut = 0.1, thp = 200,
tht = 80, bht = 180, API = 40, gas.sg = 0.7, wat.sg = 1.05,
oil.visc = 5, ed = 6e-04, if.tens = 30, salinity = 0, ...)
}
\arguments{
\item{object}{a class object}
\item{...}{additional parameters}
\item{field.name}{field name. Usually comprises several wells}
\item{well.name}{well name}
\item{depth.wh}{depth at wellhead}
\item{depth.bh}{depth at bottomhole of well feet}
\item{diam.in}{well diameter inches}
\item{GLR}{gas liquid ratio}
\item{liq.rt}{liquid rate}
\item{wcut}{watercut}
\item{thp}{tubing head pressure}
\item{tht}{tubing head temperature}
\item{bht}{bottomhole temperature}
\item{API}{gravity of oil}
\item{gas.sg}{specific gravity of gas}
\item{wat.sg}{specific gravity of water}
\item{oil.visc}{oil viscosity}
\item{ed}{relative rougness}
\item{if.tens}{interfacial tension between ...}
\item{salinity}{water salinity}
}
\description{
Set the VLP input for the well
Set the VLP inputs
}
|
6a6b12be79a2f69ac181183e445597f45c5c465e | 167dc4df143d856f9e361b2320f26e2253d56f73 | /bbs/bbs-spPGOccGP.R | 28e709d29395d959ecfcb71bd12c24fa5a7ec7eb | [] | no_license | doserjef/Doser_etal_2021_spOccupancy | 3bb5d930eb21c946ee518df8cedf0cb612cf5cc9 | f68bc26ee2fa8d91175039cf496e0edeb794c114 | refs/heads/main | 2023-04-11T17:46:02.564432 | 2022-04-04T10:31:07 | 2022-04-04T10:31:07 | 422,871,478 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,542 | r | bbs-spPGOccGP.R | # bbs-spPGOccGP.R: this script runs a single species spatial occupancy model for
# Black-throated Blue Warbler across the eastern US in 2018.
# The model is fit with a full Gaussian process.
# Author: Jeffrey W. Doser
# Citation:
rm(list = ls())
library(spOccupancy)
library(coda)
library(sf)
# Read in the data --------------------------------------------------------
load("bbs/data/bbs-btnw-bundle.R")
# Get coordinates in a projection rather than lat-long.
coords.sf <- st_as_sf(data.frame(bbs.btnw.dat$coords),
coords = c("Longitude", "Latitude"),
crs = "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
# Albers equal area across contiguous US.
coords.sf.albers <- coords.sf %>%
st_transform(crs = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs")
# Get coordinates in Albers Equal Area
coords.albers <- st_coordinates(coords.sf.albers)
# Convert coordinates to km in Albers equal area.
bbs.btnw.dat$coords <- coords.albers / 1000
# Get inits values -----------------------------------------------------
# This is used to run scripts from the command line to run multiple chains
# simultaneously across multiple cores. This is a basic way of running
# multiple chains in parallel using spOccupancy. To run a single chain
# directly in the script, uncomment the following line of code and comment
# out the line of code underneath it:
# p.file.name <- "pfile-sp-1"
p.file.name <- commandArgs(trailingOnly = TRUE)
chain <- unlist(strsplit(p.file.name, "-"))[3]
p.file <- read.table(paste("bbs/", p.file.name, sep = ''),
sep = ' ', header = FALSE)
alpha.start <- p.file[p.file[, 1] == 'alpha', 2]
beta.start <- p.file[p.file[, 1] == 'beta', 2]
sigma.sq.start <- p.file[p.file[, 1] == 'sigma.sq', 2]
phi.start <- p.file[p.file[, 1] == 'phi', 2]
w.start <- p.file[p.file[, 1] == 'w', 2]
# Run Model ---------------------------------------------------------------
occ.formula <- ~ elev + elev.2 + pf
det.formula <- ~ day + day.2 + tod + (1 | obs)
p.det <- length(bbs.btnw.dat$det.covs)
p.occ <- ncol(bbs.btnw.dat$occ.covs) + 1
# Prep spatial stuff
dist.bbs <- dist(bbs.btnw.dat$coords)
mean.dist <- mean(dist.bbs)
min.dist <- min(dist.bbs)
max.dist <- max(dist.bbs)
inits <- list(alpha = rep(alpha.start, p.det),
beta = rep(beta.start, p.occ),
sigma.sq = sigma.sq.start,
phi = phi.start,
w = rep(w.start, nrow(bbs.btnw.dat$y)),
z = apply(bbs.btnw.dat$y, 1, max, na.rm = TRUE))
priors <- list(beta.normal = list(mean = rep(0, p.occ),
var = rep(2.72, p.occ)),
alpha.normal = list(mean = rep(0, p.det),
var = rep(2.72, p.det)),
phi.unif = c(3 / max.dist, 3 / min.dist),
sigma.sq.ig = c(2, 5))
batch.length <- 25
n.batch <- 2000
n.burn <- 10000
n.thin <- 20
n.report <- 20
tuning <- list(phi = 1)
# Run model
out <- spPGOcc(occ.formula = occ.formula,
det.formula = det.formula,
data = bbs.btnw.dat,
inits = inits,
batch.length = batch.length,
n.batch = n.batch,
tuning = tuning,
priors = priors,
n.omp.threads = 1,
verbose = TRUE,
NNGP = FALSE,
cov.model = 'exponential',
n.burn = n.burn,
n.thin = n.thin,
n.report = n.report)
# Save results
save(out, file = paste("results/bbs-spPGOcc-GP-", chain, "-",
Sys.Date(), ".R", sep = ''))
|
f2730b557ddd9df6f6ca2caf4f65da7e88d3ee07 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831050-test.R | 52edd9dceac286289b7bdf5fac43165a9f68344b | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 604 | r | 1615831050-test.R | testlist <- list(doy = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), latitude = numeric(0), temp = c(9.61276249046606e+281, 9.61276194949246e+281, 8.44440274424104e-227, 2.37636672466555e-212, 8.5728629954997e-312, 1.56898420607264e+82, -1.39145984782891e+306, -2.4457606875069e-94, 2.79620616431139e-119, -2.63944113342412e-209, 8.07366913339783e-292, 6.2322638180856e-218, 2.6432918059875e-104, -5.83380844035165e+196, 6.31973900816608e-261, 1.3468020202225e-20, -3.01941152118334e+268, -7.42049538392595e+278))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
bc0b331c8abbc20f407b4be0c850c1bdf860a0de | c320f24a8099951a226944cb5ca681808f6689c5 | /2a_Genome_assembly/E_assembly_evaluation/batch_table_functions.R | 9820b40fc51bd77a65c1456dbaaab0a7e9f48ad1 | [] | no_license | AsexGenomeEvol/Timema_asex_genomes | 8abb44a8eee376aaf1e9f71fa3a825a9c2850416 | d224ec578dce30799e152a9a29b134fd725e0ad5 | refs/heads/main | 2023-04-16T02:55:04.951616 | 2022-05-31T16:27:42 | 2022-05-31T16:27:42 | 313,881,475 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,624 | r | batch_table_functions.R | require(AsexStats)
read_busco <- function(asm_path){
if(is.na(asm_path)){
return(data.frame(complete = NA, duplicated = NA, fragmented = NA, missing = NA))
}
busco_file <- dir(asm_path, full.names = T, pattern = 'short_summary')
if(length(busco_file) != 1){
return(data.frame(complete = NA, duplicated = NA, fragmented = NA, missing = NA))
}
busco_file <- readLines(busco_file)
total_genes <- as.numeric(ssplit(busco_file[15], '\t')[2])
bscores <- data.frame(complete = as.numeric(ssplit(busco_file[10], '\t')[2]),
duplicated = as.numeric(ssplit(busco_file[12], '\t')[2]),
fragmented = as.numeric(ssplit(busco_file[13], '\t')[2]),
missing = as.numeric(ssplit(busco_file[14], '\t')[2]))
bscores[1,] <- round(100 * (bscores[1,] / total_genes), 2)
return(bscores)
}
batch_stats <- function(batch){
batch_table <- make_data_frame(c('sp', 'total_sum', 'NG50', 'BUSCOc', 'BUSCOf', 'Ns'))
for(sp in timemas){
asm <- read.table(paste0('stats/assemblies/',sp,'_scfs.tsv'), header = T)
sp_batch <- batch_subset(asm, batch)[1,]
asm_path <- paste0('data/',sp,'/assembly/',sp_batch$dir)
N_file <- dir(asm_path, full.names = T, pattern = '_Ns.tsv')
if(length(N_file) == 1){
Ns <- read.table(N_file)[1,2]
} else {
Ns <- NA
}
BUSCOs <- read_busco(asm_path)
batch_table <- rbind(batch_table, data.frame(sp = sp,
total_sum = sp_batch$total_sum,
NG50 = sp_batch$NG50,
complete = BUSCOs$complete,
fragmented = BUSCOs$fragmented,
Ns = Ns))
}
batch_table$Ns <- round(100 * (batch_table$Ns / batch_table$total_sum), 2)
batch_table$total_sum <- round(batch_table$total_sum / 1e9, 3)
return(batch_table)
}
var_summary <- function(batch_table, var){
return(c(min(batch_table[,var], na.rm = T),
median(batch_table[seq(1,10, by = 2),var], na.rm = T),
median(batch_table[seq(2,10, by = 2),var], na.rm = T),
max(batch_table[,var], na.rm = T)))
}
get_batch_table <- function(batch_table){
full_summary <- c()
for(var in c('total_sum', 'NG50', 'complete', 'fragmented', 'Ns')){
full_summary <- c(full_summary, var_summary(batch_table, var))
}
return(full_summary)
}
|
6a87bcc86eb1ef62e68ac02a0914d191d054613f | 9f3c0e3d1030a760b2ab2aaeea0c63df6f428315 | /RY-Shiny/app.R | 25d19a010488fa8a5c6b0fa31c8948a1fae120b2 | [] | no_license | ISSS608-G1-Group11/ISSS608_Group_Project | 0223db7814b00df70b8ce364c643d073d469624d | 7f63fcd8f711213e79a3f8b8e2152d218a85ef86 | refs/heads/master | 2023-07-07T15:14:26.765999 | 2021-08-15T18:17:05 | 2021-08-15T18:17:05 | 378,569,486 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,683 | r | app.R | library(shiny)
packages = c('raster','sf','tmap','clock','tidyverse','lubridate','ggiraph',
'ggthemes','viridis','plotly','treemapify','igraph','ggpubr',
'readr','mapview')
for (p in packages){
if(!require(p, character.only = T)){
install.packages(p)
}
library(p,character.only = T)
}
cd <- read_csv("data/cc_data.csv")
cd_locations <- unique(cd$location)
cdcount_location <- cd %>% group_by(location) %>%
summarize(count = n())
oldvalues <- c("Abila Airport","Abila Scrapyard","Abila Zacharo",
"Ahaggo Museum","Albert's Fine Clothing",
"Bean There Done That","Brew've Been Served",
"Brewed Awakenings","Carlyle Chemical Inc.",
"Chostus Hotel","Coffee Cameleon","Coffee Shack",
"Desafio Golf Course","Frank's Fuel",
"Frydos Autosupply n' More","Gelatogalore",
"General Grocer","Guy's Gyros","Hallowed Grounds",
"Hippokampos","Jack's Magical Beans","Kalami Kafenion",
"Katerina's Cafe","Kronos Mart","Kronos Pipe and Irrigation",
"Maximum Iron and Steel","Nationwide Refinery",
"Octavio's Office Supplies","Ouzeri Elian",
"Roberts and Sons","Shoppers' Delight",
"Stewart and Sons Fabrication","U-Pump")
newvalues <- factor(c("Business","Business","Unknown",
"Living","Living","Unknown","Dinning",
"Unknown","Business","Living","Dinning",
"Dinning","Living","Unknown","Unknown",
"Dinning","Living","Dinning","Dinning",
"Living","Living","Unknown","Dinning",
"Living","Business","Business","Business",
"Business","Unknown","Business","Living",
"Business","Unknown"
))
cdcount_location$type <- newvalues[ match(cdcount_location$location, oldvalues) ]
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Popular locations"),
sidebarLayout(
sidebarPanel(
checkboxInput(inputId = "showdata",
label = "Show data table",
value = TRUE)
),
mainPanel(plotlyOutput("barchart"),
DT::dataTableOutput(outputId = "bartable"))
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$barchart <- renderPlotly({
p <- ggplot(cdcount_location,
aes(x = count,
y = reorder(location,count),
fill = type,
stringr::str_wrap(cdcount_location$location,15)))+
geom_col(color = "grey") +
xlab("Frequency") + ylab("Location") +
ggtitle("Popularity of each place (Credit)") +
theme(axis.text.x = element_text(face="bold", color="#000092",
size=8, angle=0),
axis.text.y = element_text(face="bold", color="#000092",
size=8, angle=0),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
ggplotly(p)
})
output$bartable <- DT::renderDataTable({
if(input$showdata){
DT::datatable(data = cdcount_location %>% select (1:3),
options = list(pageLength = 10),
rownames = FALSE)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
ca5060924dbbd7b875198fb6c8aa7191e29b0cde | 28259b09f412e7e2e044d4b85e74d161c9b050a4 | /man/Gauss3.Rd | 935d1e0d412e9e3f90ee40aeef7ee31e55c78665 | [] | no_license | dmbates/NISTnls | 584048b42b46e693bd017c199f78f384c77138c7 | 8ab6f2804eef9e6df10e3c88d17b39784eecde28 | refs/heads/master | 2021-01-15T13:45:10.545698 | 2012-09-05T22:34:25 | 2012-09-05T22:34:25 | 5,692,485 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,376 | rd | Gauss3.Rd | %%% $Id: Gauss3.Rd,v 1.3 2003/07/22 19:42:20 bates Exp $
\name{Gauss3}
\alias{Gauss3}
\non_function{}
\title{Generated data}
\description{
The \code{Gauss3} data frame has 250 rows and 2 columns giving generated
data of Gaussian peaks with a decaying exponential background.
}
\format{
This data frame contains the following columns:
\describe{
\item{y}{
A numeric vector of generated responses.
}
\item{x}{
A numeric vector of generated inputs.
}
}
}
\details{
The data are two strongly-blended Gaussians on a
decaying exponential baseline plus normally
distributed zero-mean noise with variance = 6.25.
}
\source{
Rust, B., NIST (1996).
}
\examples{
Try <- function(expr) if (!inherits(val <- try(expr), "try-error")) val
plot(y ~ x, data = Gauss3)
Try(fm1 <- nls(y ~ b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ b6*exp( -(x-b7)**2 / b8**2 ), data = Gauss3, trace = TRUE,
start = c(b1 = 94.9, b2 = 0.009, b3 = 90.1, b4 = 113, b5 = 20,
b6 = 73.8, b7 = 140, b8 = 20)))
Try(fm1a <- nls(y ~ b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ b6*exp( -(x-b7)**2 / b8**2 ), data = Gauss3, trace = TRUE,
start = c(b1 = 94.9, b2 = 0.009, b3 = 90.1, b4 = 113, b5 = 20,
b6 = 73.8, b7 = 140, b8 = 20), alg = "port"))
Try(fm2 <- nls(y ~ b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ b6*exp( -(x-b7)**2 / b8**2 ), data = Gauss3, trace = TRUE,
start = c(b1 = 96, b2 = 0.0096, b3 = 80, b4 = 110, b5 = 25,
b6 = 74, b7 = 139, b8 = 25)))
Try(fm2a <- nls(y ~ b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ b6*exp( -(x-b7)**2 / b8**2 ), data = Gauss3, trace = TRUE,
start = c(b1 = 96, b2 = 0.0096, b3 = 80, b4 = 110, b5 = 25,
b6 = 74, b7 = 139, b8 = 25), alg = "port"))
Try(fm3 <- nls(y ~ cbind(exp(-b2*x), exp(-(x-b4)**2/b5**2), exp(-(x-b7)**2/b8**2)),
data = Gauss3, trace = TRUE,
start = c(b2 = 0.009, b4 = 113, b5 = 20, b7 = 140, b8 = 20),
algorithm = "plinear"))
Try(fm4 <- nls(y ~ cbind(exp(-b2*x), exp(-(x-b4)**2/b5**2), exp(-(x-b7)**2/b8**2)),
data = Gauss3, trace = TRUE,
start = c(b2 = 0.0096, b4 = 110, b5 = 25, b7 = 139, b8 = 25),
algorithm = "plinear"))
}
\keyword{datasets}
|
a2a74a6886af5f55a255e5eb2ba20fdb3fa78892 | f9d6ff022b97ff2d299c8927cdb8884d51e51701 | /R/replace_on_condition.R | 63bf1aef34d6d6ba329414ab490059ba4612dc61 | [] | no_license | antchau/glider | c0571ef7e69c440ca11f99026e0725ed3e126f2b | 9deaafc9aaca9c5f1e9d4fd143d8872169824a7e | refs/heads/master | 2023-01-22T03:05:37.216056 | 2020-11-30T22:26:44 | 2020-11-30T22:26:44 | 286,846,018 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,324 | r | replace_on_condition.R | #' Mutate column given a condition
#'
#' @param df A data frame
#' @param condition An anonymous Function that evaluates to a boolean.
#' @param pattern_condition_col A regex pattern that specifies the column to test the condition. As of now, limit
#' to only one condition column
#' @param replacement_value Replacement value if condition is true.
#' @param pattern_replacement_cols A regex pattern that specifies the columns to perform replacement
#'
#' @return A data frame
#' @export
#'
#' @examples
#' replace_on_condition(psi, condition = function(x) x == 12, pattern_condition_col = "_count_row_missing$",
#' replacement_value = NA, pattern_replacement_cols = "sum$|average$)
replace_on_condition <- function(df, condition, pattern_condition_col, replacement_value, pattern_replacement_cols){
# column to check condition
condition_col <- names(df)[grepl(names(df), pattern = pattern_condition_col)]
if(length(condition_col) > 1){
stop("Can only specific 1 condition column")
}
condition_col <- as.vector(df[,condition_col])
# columns to replace values if condition is satisfied
replacement_cols <- names(df)[grepl(names(df), pattern = pattern_replacement_cols)]
for (col in replacement_cols){
df[[col]] <- ifelse(condition(condition_col), replacement_value, df[[col]])
}
return(df)
}
|
0028f10fe8caffc17dcc475bd2b0a185b0b36079 | 8f5305f4cb54ffd6e976918aee15abc88c8f2e2c | /MQMpackage/man/plot.MQMone.Rd | 0af67773fb37873d57340e1bd89be32cb1c30f84 | [] | no_license | pjotrp/qtl | 1029f7f67cfbe2301d8ee722818f454b195fa620 | 36527911f2dfb58abc7a076d5ed314cf554cbfa7 | refs/heads/master | 2016-09-06T20:11:17.419727 | 2009-04-23T12:10:54 | 2009-04-23T12:10:54 | 110,997 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,420 | rd | plot.MQMone.Rd | \name{plot.MQMone}
\alias{plot.MQMone}
\title{ plot.MQMone - Plotting routine to display the results from a MQMscan }
\description{
Plotting routine to display the results from a MQMscan
}
\usage{
plot.MQMone(result = NULL, result2=NULL, extended = 0,\dots)
}
\arguments{
\item{result}{ Results from scanMQM of type scanone }
\item{result2}{ Results from scanMQM of type scanone }
\item{extended}{ Extended plotting of the information content }
\item{\dots}{ Extra argument passed to the plot.scanone }
}
\details{
~~ If necessary, more details than the description above ~~
}
\value{
No return, plotting routine
}
\author{ Danny Arends \email{Danny.Arends@gmail.com} }
\note{
If u find anything wrong with the algorithm please notify me at: \email{Danny.Arends@gmail.com}
}
\seealso{
\itemize{
\item \code{\link{scanMQM}} - Function called to do single trait analysis
}
}
\examples{
#Simulated F2 Population
library(MQMpackage)
f2qtl <- c(3,15,3,7) # QTL at chromosome 3
data(map10) # Mouse genome
f2cross <- sim.cross(map10,f2qtl,n=100,type="f2") # Simulate a F2 Cross
f2result <- scanMQM(f2cross) # Do a MQM scan of the genome
plot.MQMone(f2result) #Use our fancy plotting routine
}
\keyword{ QTL }
\keyword{ Mapping }
\keyword{ Selection }
\keyword{ hplot }
|
93f0f24b662aef98e370a2d184b1a1f6f63f0b8d | b65ad7c3e3789f802aaddaf4825d115f679f37bb | /man/decompTSbfast.Rd | 6e2474ef2f2202e24033e739854da3ff6a7c23eb | [
"Apache-2.0"
] | permissive | RETURN-project/BenchmarkRecovery | 6f053e6db100f0aa65e49f31188a8567ef3b503b | a82dd7043185b97f6720a3e7ea7bb0d1c0d0b501 | refs/heads/master | 2023-04-19T02:44:23.632332 | 2021-07-13T12:06:04 | 2021-07-13T12:06:04 | 207,575,104 | 0 | 1 | Apache-2.0 | 2021-07-13T12:35:35 | 2019-09-10T14:00:52 | R | UTF-8 | R | false | true | 2,183 | rd | decompTSbfast.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun_characterize.R
\name{decompTSbfast}
\alias{decompTSbfast}
\title{Decompose time series into trend, seasonality and remainder: This function decomposes time series into three components using BFAST01 functionality: trend, seasonality and remainder. Trends are fitted using linear regression without breaks, seasonality is fitted using a first order harmonic function and the remainder equals the anomalies (i.e. time series - trend - seasonality).}
\usage{
decompTSbfast(df, nyr, nobsYr)
}
\arguments{
\item{df}{a dataframe with time series that need to be decomposed. The dataframe needs to be structured as follows: each row represents a sampled pixel. The first two columns contain the latitude and longitude of the pixel. The next columns contain the time series values for each observation date.}
\item{nyr}{number of years of the input time series}
\item{nobsYr}{number of observations per year of the input time series}
}
\value{
a list containing the estimated seasonality, remainder, trend and seasonality coefficients. The seasonality is a dataframe with the seasonality of each pixel. Each row represents a sampled pixel. The first two columns contain the latitude and longitude of the pixel. The next columns contain the seasonality values for each observation date. The trend and remainder are dataframes with the trend and remainder of each pixel (dataframe is structured in the same way as the seasonality). Seasonality_coefficients is a dataframe with the coeficients of the fitted harmonic function. Each row represents a sampled pixel. The first two columns contain the latitude and longitude of the pixel. The next columns contain the coefficients of the fitted harmonic function.
}
\description{
Decompose time series into trend, seasonality and remainder: This function decomposes time series into three components using BFAST01 functionality: trend, seasonality and remainder. Trends are fitted using linear regression without breaks, seasonality is fitted using a first order harmonic function and the remainder equals the anomalies (i.e. time series - trend - seasonality).
}
|
4baa0150bad20325cbcd5d7e03ddc24987ba2d6d | 705584bc6ae940229a6a4f1cdd80a7e6e6adf4b7 | /HomeworkForAllele.R | f1f87d71e5db43c944e92d4b6a3ce884f63b35ea | [] | no_license | hienle215/HandsOnGenetics | f87cd386ea8205158cfc5aaa1aa6d8353e509bda | 12577814686f3b0a192863fa695e08995724d018 | refs/heads/master | 2023-01-22T13:52:37.187615 | 2020-12-03T11:45:10 | 2020-12-03T11:45:10 | 308,636,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 585 | r | HomeworkForAllele.R | ###ASSIGNMENT 3
# Make a vector were you store the genotype data
genotype <- c("A/A","A/T","T/T","A/A","A/A","A/T","A/A","A/A","A/A","A/T")
genotype
#Make the tyab function to calculate the genotype counts
```{r, prompt=TRUE}
tb <- table(genotype)
tb
```
#Use the formula for counting the allele frequencies
#Indexing a table object
tb[1]
tb[2]
tb[3]
#calculating allele frequency for allele A
allelefreqA <- (2*tb[1] + tb[2])/(2*tb[1] +2*tb[2] +2*tb[3])
allelefreqA
allelefreqA <- as.numeric(allelefreqA)
allelefreqA
class(allelefreqA)
allelefreqT <- 1 - allelefreqA
allelefreqT
|
fe6098b191fd4cfff73f9ccc4b6e1b518589be08 | 59814a0b78638216a579ab39fcd89922ccbb1c9e | /R/CTABLE.R | 08865fc0459f88b2a40ba422ad6e5502c84ffa51 | [] | no_license | MarkusLang1987/MPCodes | 6f900f4dbc0d946fb562c229029a095cc12b5cc3 | 8b6ff8cfdf78d1342887a984bf92ad8270577611 | refs/heads/master | 2020-03-07T10:59:03.554543 | 2018-04-19T09:23:49 | 2018-04-19T09:23:49 | 127,444,840 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,234 | r | CTABLE.R |
mp_CTable <- function(data, varx, vary, Gewichtung = FALSE, varGewicht){
a = 1
b = 2
frqtab <- table(data[[varx]], data[[vary]])
proptab <- round(prop.table(frqtab, 2), 2)
Ergframe <- data.frame(matrix(nrow = nrow(frqtab), ncol = ncol(frqtab) * 2))
for(i in 1:ncol(frqtab)) {
x = as.data.frame(frqtab[,i])
y = as.data.frame(proptab[,i])
Ergframe[a:b] <- cbind(x,y)
colnames(Ergframe)[a] <- colnames(frqtab)[i]
colnames(Ergframe)[b] <- colnames(proptab)[i]
for(i in 1:nrow(frqtab)) {
row.names(Ergframe)[i] <- row.names(frqtab)[i]
}
a = a + 2
b = b + 2
}
if(Gewichtung == TRUE){
Gewicht <- aggregate(data[[varGewicht]], by=list(data[[varx]]), FUN=mean, na.rm = TRUE)
for(i in 1:nrow(Ergframe)) {
Ergframe[i,] <- Ergframe[i,] * Gewicht[i,2]
for(i in 1:ncol(Ergframe)){
if(i %% 2 != 0) {
Ergframe[,i] <- round(Ergframe[,i],0)
} else {
Ergframe[,i] <- round(Ergframe[,i],2)
}
}
}
} else {}
return(Ergframe)
}
|
b5d975a2758b8b0986fd53b3df192582728521f2 | afed6bdf5b2b39e61c1d108f8a856e4ce38031a7 | /05-olympics/olympics.R | 9089b01c63da71c26ea426c418a48da8d6e32d00 | [] | no_license | baifengbai/tidy-tuesday | ef97a1c1ce4ffad064f146e76a259926fb9fd773 | 97c8002212cbe76a8c330f045975eab64656938f | refs/heads/master | 2023-07-13T04:45:07.250863 | 2021-08-18T15:31:35 | 2021-08-18T15:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,527 | r | olympics.R |
# Setup -------------------------------------------------------------------
librarian::shelf(tidyverse, ggbump)
olympics <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-07-27/olympics.csv')
glimpse(olympics)
rank <-
olympics %>%
mutate(points = case_when(medal == 'Gold' ~ 3,
medal == 'Silver' ~ 2,
medal == 'Bronze' ~ 1,
TRUE ~ 0)) %>%
count(year, team, wt = points) %>%
group_by(team) %>%
mutate(cumulative_medals = cumsum(n)) %>%
group_by(year) %>%
mutate(rank = rank(desc(cumulative_medals),
ties.method = 'random')) %>%
arrange(year, rank) %>%
ungroup() %>%
filter(year > 1980)
top_10 <-
olympics %>%
mutate(points = case_when(medal == 'Gold' ~ 3,
medal == 'Silver' ~ 2,
medal == 'Bronze' ~ 1,
TRUE ~ 0)) %>%
count(team, wt = points) %>%
mutate(rank = rank(desc(n),
ties.method = 'random')) %>%
arrange(desc(n)) %>%
head(5)
# Setup -------------------------------------------------------------------
librarian::shelf(tidyverse, ggbump)
olympics <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-07-27/olympics.csv')
glimpse(olympics)
semi_join(rank, top_10, by = 'team') %>%
ggplot(aes(x = year, y = rank, color = team)) +
geom_bump(show.legend = FALSE)
|
68d6b21c525690b892094e7c563c90c92eec2839 | e9d1de2290c91be3225a8d245c36ad5a1e1049f8 | /man/fretr.Rd | ecc4568403f8bd3fe099e5f36f91d316a5ebf8dd | [] | no_license | mwrowe/fretr | 895b066c596dc10d04a6df9127ff17afc1397dbb | 75fbbe16b015be89ca4ae6829b67f15ea6263d20 | refs/heads/master | 2021-03-28T00:17:56.622540 | 2020-03-16T22:42:44 | 2020-03-16T22:42:44 | 247,820,350 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,081 | rd | fretr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fretr.R
\docType{package}
\name{fretr}
\alias{fretr}
\title{Generate Fretboard Maps of Scales and Chords}
\description{
This package draws "fretboard maps", positions of notes along the fretboard
of a stringed instrument, such as a guitar. Typical uses would be to
illustrate all the positions at which a particular chord or scale can be
played, or relationships between scales, modes and chords. These functions
have not been tested extensively except for 6-string guitar in standard
tuning-- caveat emptor.
}
\section{Fretboard Map Functions}{
Each of these functions generates a pdf file with sets of fretboards, chord
or scale diagrams meant to illustrate particular relationships between
the scales, chords and their notes.
\itemize{
\item \code{\link{chordShapePlots}}:
Plot major and minor chord shapes of a given root, individually and within
the neck of a guitar with standard tuning. Optionally sevenths of each
chord may be included. Six fretboards will be plotted.
\item \code{\link{scalePositionPlots}}:
Plots fretboards of a scale/mode, and its diatonic chords by position.
On the first fretboard, all of the notes of the scale are shown for the
whole neck. In the second column, the notes of the scale are shown
broken out by CAGED position on the neck. Next the notes of the major
pentatonic scale for each position are shown. The remaining seven
columns show the CAGED chord shapes at each position, with the dominant
7th substituted for the diminished chord.
\item \code{\link{parallelModePlots}}:
Plots seven fretboard maps showing all the modes of a given tonic note,
ordered such that only a single note differs between successive modes.
}
}
\section{Low-Level Functions}{
\itemize{
\item \code{\link{fretNotes}}:
Assign scale notes to positions of a fretboard given instrument
parameters (number of strings, frets and tuning) and major or minor key.
\item \code{\link{diatonicChordNotes}}:
Find the notes of a diatonic chord within its scale, given the root
note.
\item \code{\link{chordNotesByType}}:
Get notes of an arbitrary chord, by root note and chord type, where
"arbitrary" means not necessarily diatonic to the underlying scale that
is specified by the first argument.
\item \code{\link{findPositions}}:
Find CAGED chord/scale positions along the fretboard for the present
scale. (Position 1 is defined as the scale position where the chord
build on the tonic has the E shape.)
\item \code{\link{makeFretboardString}}:
Convert a set of notes on a fretboard to a string representation.
\item \code{\link{drawNeck}}:
Calculate fret spacing and draw an empty fretboard.
\item \code{\link{plotFretboardByPosition}}:
Plot whole fretboard with positions separated by offset; join the frets
in common.
\item \code{\link{drawNotes}}:
Add note markers and labels at particular locations to a fretboard plot.
}
}
|
d4584208e57560c0fdafb64e2fc54c1afff24701 | 797f914f7521422905d808773693845089a7bb05 | /9_FigS6_PTEN_PI3K_PI345P3_perturbations/FigS6a.r | f42f35658b393593f1e3f922f25eef46ec3e747d | [] | no_license | LBSA-VoitLab/ENaC-regulation-by-phospholipids-and-DGK-explained-through-mathematical-modeling-main | 746ea99eba858d63ce560f82ddd2cabe89ae4bf7 | 43e20af276fa8dd391839d96ebef5a040d6a3597 | refs/heads/main | 2023-04-20T11:10:23.296179 | 2021-05-07T18:59:12 | 2021-05-07T18:59:12 | 365,327,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,985 | r | FigS6a.r | ### Please run the model first in the basolateral configuration (MK20_ENaC_7_basolateral.R) ###
# or alter the model to simulate basolateral part of the plasma membrane
# To do that, you must divide gammaPTENc_PTENa by 10
# This will decrease the PTEN ability to attach to the plasma membrane (became active)
### Carsten data - second try ###
# first plot - original values
wd = getwd()
setwd(file.path(wd,'9_FigS6_PTEN_PI3K_PI345P3_perturbations'))
shultz_data_1_PI3KI = read.csv("Shultz_data_1_PI3KI.csv")
setwd(wd)
windows()
plot(shultz_data_1_PI3KI,type='p',col='firebrick',xlim=c(0,50),ylim=c(.6,1.5),main='Carsten data 1')
points(shultz_data_1_PI3KI$x,shultz_data_1_PI3KI$y-0.05,type='p',col='firebrick1',pch='-')
points(shultz_data_1_PI3KI$x,shultz_data_1_PI3KI$y+0.05,type='p',col='firebrick1',pch='-')
segments(x0=shultz_data_1_PI3KI$x,y0=shultz_data_1_PI3KI$y-0.05,y1=shultz_data_1_PI3KI$y+0.05,col='firebrick1')
wd = getwd()
setwd(file.path(wd,'9_FigS6_PTEN_PI3K_PI345P3_perturbations'))
shultz_data_1_PI345P3 = read.csv("Shultz_data_1_PI345P3.csv")
setwd(wd)
points(shultz_data_1_PI345P3,type='p',col='green')
points(shultz_data_1_PI345P3$x,shultz_data_1_PI345P3$y-0.05,type='p',col='lightgreen',pch='-')
points(shultz_data_1_PI345P3$x,shultz_data_1_PI345P3$y+0.05,type='p',col='lightgreen',pch='-')
segments(x0=shultz_data_1_PI345P3$x,y0=shultz_data_1_PI345P3$y-0.05,y1=shultz_data_1_PI345P3$y+0.05,col="lightgreen")
legend(x=0,y=1.5,legend=c('PI345P3','PI3K'),lty=1,col=c('green','firebrick'),text.col=c('green','firebrick'),bty = "n")
### Carsten data - second try ###
# first plot - hammered values so PI3KI and PIP3 start at 100%
wd = getwd()
setwd(file.path(wd,'9_FigS6_PTEN_PI3K_PI345P3_perturbations'))
shultz_data_1_PI3KI = read.csv("Shultz_data_1_PI3KI.csv")
setwd(wd)
shultz_data_1_PI3KI$y=shultz_data_1_PI3KI$y+(1-shultz_data_1_PI3KI$y[1])
# windows()
# tiff("P2F2_Fig_3_Carsten1.tiff", height = 20, width = 34, units = 'cm',compression = "lzw", res = 300)
par(mar=c(5,6,4,5)+.1)
plot(shultz_data_1_PI3KI,type='p',
col='firebrick',xlim=c(0,50),ylim=c(.6,1.7),
main='PI3K activation',xlab='Time (min)',ylab='fold change',
cex.main=3,cex.lab=3,cex.axis=2
)
points(shultz_data_1_PI3KI$x,shultz_data_1_PI3KI$y-0.05,type='p',col='firebrick1',pch='-')
points(shultz_data_1_PI3KI$x,shultz_data_1_PI3KI$y+0.05,type='p',col='firebrick1',pch='-')
segments(x0=shultz_data_1_PI3KI$x,y0=shultz_data_1_PI3KI$y-0.05,y1=shultz_data_1_PI3KI$y+0.05,col='firebrick1')
wd = getwd()
setwd(file.path(wd,'9_FigS6_PTEN_PI3K_PI345P3_perturbations'))
shultz_data_1_PI345P3 = read.csv("Shultz_data_1_PI345P3.csv")
setwd(wd)
shultz_data_1_PI345P3$y=shultz_data_1_PI345P3$y+(1-shultz_data_1_PI345P3$y[1])
points(shultz_data_1_PI345P3,type='p',col='green')
points(shultz_data_1_PI345P3$x,shultz_data_1_PI345P3$y-0.05,type='p',col='lightgreen',pch='-')
points(shultz_data_1_PI345P3$x,shultz_data_1_PI345P3$y+0.05,type='p',col='lightgreen',pch='-')
segments(x0=shultz_data_1_PI345P3$x,y0=shultz_data_1_PI345P3$y-0.05,y1=shultz_data_1_PI345P3$y+0.05,col="lightgreen")
legend(x=0,y=1.6,legend=c('PI345P3','PI3K'),lty=1,col=c('green','firebrick'),text.col=c('green','firebrick'),bty = "n",cex=2)
# Insert the number of time units that the simulation will run
tmax=3000
# Insert the step of the simulation
tstep=1
t=seq(0,tmax+1,tstep) # time
### perturbations for Carsten data 1
# Finaly remove the # form the front of all the next code lines
# On the first column put the time of the perturbation
ptime=c(0,1000,2005,2040,2042,max(t));
# On the second column put the variables to be altered. (ex: 'X')
pvar=c('novar','gammaPI3KIc_PI3KIa','gammaPI3KIc_PI3KIa','gammaPI3KIc_PI3KIa','pi_3KI_a','novar')
# On the third the new value for the variable.
pval=c(NA,
parameters[names(parameters)=='gammaPI3KIc_PI3KIa'],
parameters[names(parameters)=='gammaPI3KIc_PI3KIa']*28,
parameters[names(parameters)=='gammaPI3KIc_PI3KIa']*1,353.172211855607,
NA)
perturbations=data.frame(time=ptime,var=pvar,val=pval)
perturbations
# 353.172211855607
# try 3 for initial model
out = Cruncher(state,t,equations,parameters,perturbations) # no perturbations # with perturbations
# edit(out)
# normalized graphs
stst=out[500,] # getting steady state at time 500
out_n=cbind(time=out[1],out[,2:ncol(out)])
tail(out_n)
out_norm=out[,1] # creating matrix out_norm to store the normalized information
for (i in 2:ncol(out))
{
newc=c(out_n[,i]/as.numeric(stst[i])) # normalizing to steady state got at time 500
out_norm=cbind(out_norm,newc) # storing normalized information
}
colnames(out_norm)=colnames(out)
head(out_norm)
# edit(out_norm)
points((out_norm[2000:2060,1]-2000),out_norm[2000:2060,9],type='l',col='green',lwd='5')
points((out_norm[2000:2060,1]-2000),out_norm[2000:2060,10],type='l',col='firebrick',lwd='5')
# dev.off()
|
86ecb785818113b77b8202188be66587d6b8df5d | 8c0b0e9a59c7230f3a23dbe8b29d2cde68733524 | /man/speciesData.Rd | 2f2b453108c7e116f73f30af370945b8d896f4c2 | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-other-copyleft",
"GPL-3.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"GPL-3.0-only"
] | permissive | RS-eco/rasterSp | 33754976f86858511ccd6562fd16d47205a9e0df | 8c007900ceb2679f8aee04aec4d936648df5191b | refs/heads/main | 2023-01-11T11:49:02.325118 | 2023-01-09T08:00:37 | 2023-01-09T08:00:37 | 225,630,913 | 19 | 7 | MIT | 2021-07-07T07:15:15 | 2019-12-03T13:48:02 | R | UTF-8 | R | false | true | 1,040 | rd | speciesData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/speciesData.R
\name{speciesData}
\alias{speciesData}
\title{Create dataframe of species presence from multiple raster files}
\usage{
speciesData(species_names = NA, path = getwd(), filename = NA, ...)
}
\arguments{
\item{species_names}{List with species names}
\item{path}{Path to location of raster files}
\item{filename}{Specify filename of output}
\item{...}{Additional arguments:
na: String used for missing values. Defaults to NA. Missing values will never be quoted; strings with the same value as na will always be quoted.
append: If FALSE, will overwrite existing file. If TRUE, will append to existing file. In both cases, if file does not exist a new file is created.
col_names: Write columns names at the top of the file?}
}
\value{
raster layer with species richness of files provided
}
\description{
Read raster files of multiple species and
extract only the coordinates, where individuals are present
}
\examples{
\dontrun{
speciesData()
}
}
|
1c63a612672a09d40f22f193ae807960ea8adb18 | 2a8b8606599677082d7210a13707158c0bc27278 | /man/check_winlinux.Rd | 79570c5b226f83fa93707dbfb2125227ccc00c73 | [
"MIT"
] | permissive | mathesong/kipettools | be223ac78bd4b80b43dfb625fdbcc98536c53988 | 98bc5564a2f28180e8ff5b477b19d3b63d26dafe | refs/heads/master | 2022-11-12T13:28:07.787173 | 2022-11-02T21:56:53 | 2022-11-02T21:56:53 | 114,106,094 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 414 | rd | check_winlinux.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/system_checks.R
\name{check_winlinux}
\alias{check_winlinux}
\title{Check if the Windows Ubuntu extension is installed and available}
\usage{
check_winlinux()
}
\value{
Logical of whether Ubuntu can be called from Windows
}
\description{
Check if the Windows Ubuntu extension is installed and available
}
\examples{
check_winlinux()
}
|
2c4859bb3dc54365a85647ce5575bc1234fd9639 | 2099a2b0f63f250e09f7cd7350ca45d212e2d364 | /AI-Dataset/Summary_rnd/S000437021300129X.xml.A.R | b7c82c17332adacb936018107415cf0b6d27fa22 | [] | no_license | Angela7126/SLNSumEval | 3548301645264f9656b67dc807aec93b636778ef | b9e7157a735555861d2baf6c182e807e732a9dd6 | refs/heads/master | 2023-04-20T06:41:01.728968 | 2021-05-12T03:40:11 | 2021-05-12T03:40:11 | 366,429,744 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,311 | r | S000437021300129X.xml.A.R | <html>
<head>
<meta name="TextLength" content="SENT_NUM:7, WORD_NUM:151">
</head>
<body bgcolor="white">
<a href="#0" id="0">To this end we first introduce fact-changing epistemic expectation models and protocol models, {a mathematical formula}MexpF and {a mathematical formula}AF, given by {a mathematical formula}〈Mexp,F〉 and {a mathematical formula}〈A,F〉, where {a mathematical formula}Mexp is an epistemic expectation model, {a mathematical formula}A is a protocol model and {a mathematical formula}F is a factual change system.</a>
<a href="#1" id="1">If {a mathematical formula}ε∈L(π), the output function o maps a regular expression π to ε; otherwise, it maps π to δ[17], [18]:{a mathematical formula} The above construction of the output function helps to compute the residual of compositions.</a>
<a href="#2" id="2">We would like to generate the epistemic expectation model in Example 7 (see p.</a>
<a href="#3" id="3">□</a>
<a href="#4" id="4">Also, the introduction of knowledge tests may make the satisfiability problem of the logic undecidable.</a>
<a href="#5" id="5">Consider a room where a child is playing with a small plastic seat, and Dora standing outside the room.</a>
<a href="#6" id="6">Moreover, we use ‘hidden protocols’ on top of public ones.</a>
</body>
</html> |
93d7bf9701b06e6e4e716146988a55f39a4c6387 | a2310bfbb79bf1ccf14f40477158ca3d26965952 | /8_IRT.R | c74fba1bf8b929060946769a9e0ed7bf4327139a | [] | no_license | dczhang1/analysisTemplates | a2ca8a46819e48ebbcb054fd1595266b82fc76e9 | c3dd65ae16d0e93be0baa89001ab53d25aa7db33 | refs/heads/master | 2023-09-02T19:33:28.603941 | 2023-08-08T22:00:46 | 2023-08-08T22:00:46 | 214,302,559 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,161 | r | 8_IRT.R | ### Item response theory
library(ltm)
### Unidimensional Graded Response Model
# df should contain only vars for the analysis
grm.model.1 <- grm(df)
summary(grm.model.1)
coef(grm.model.1)
plot(grm.model.1, type = "ICC")
plot(grm.model.1, type = "IIC")
### Dichotomous data
### https://www.youtube.com/watch?v=L1S7o49r0nI
###Library
library(ltm)
library('mirt')
library('shiny')
###Load up sample data
data <- expand.table(LSAT7)
###Look at summary statistics
summary(data)
###Running a 2PL and 3PL models
### Parameters:
### Location/difficult parameter: location where item discriminates between top and bottom 50%
#Negative = too easy, measures lower end of distribution
#Zero = discriminating middle of the skill distribution
#Positive = more difficult, discriminating top people only
### Discrimination
#It is the slope.
### Guess parameter: some items are easy to guess.
#Could be higher if the alternatives are poor
#Could be lower
### 2PL
#Specify model
model.2pl = ltm(data ~ z1, IRT.param = T)
#See model coefficients
coef(model.2pl)
summary(model.2pl)
#Plot
#Item charateristics curve
plot(model.2pl, type="ICC")
abline(.5,0)
#one curve
plot(model.2pl, type="ICC", items = 5)
#Item information curve
plot(model.2pl, type="IIC")
#Test average. Addition of all items IIC
plot(model.2pl, type="IIC", item = 0)
#Information
#See individual scores and underlying latent scores
factor.scores(model.2pl)
#Alternative hypothesis: inconsistent response pattern... want p-value to be non-sig
person.fit(model.2pl)
#Item fit
item.fit(model.2pl)
### 3PL
#Includes the guessing parameter
#Specify model
model.3pl <- tpm(data, type = "latent.trait", IRT.param=T)
#Coefficients
coef(model.3pl)
#plots
plot(model.3pl, type = "ICC")
plot(model.3pl, type = "IIC")
#Information
factor.scores(model.3pl)
person.fit(model.3pl)
item.fit(model.3pl)
#Compare models
anova(model.2pl, model.3pl)
summary(model.3pl)
|
a341a385751b9772087f48d5e536e1dad92e2404 | 255bde5d965a626504175d1800d22896f6820eed | /R/binGroupReg.r | 6873963326fd21f628ade063943892d9c64f59cd | [] | no_license | cran/binGroup | f0345d60454b835216ba4d29bbcbd193f3557f81 | 288a6aa03e33afa530fc1fd1eb3ad39f5b25427d | refs/heads/master | 2021-01-22T11:59:06.761427 | 2018-08-24T10:24:26 | 2018-08-24T10:24:26 | 17,694,764 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 57,826 | r | binGroupReg.r | gtreg <- function(formula, data, groupn, retest = NULL, sens = 1, spec = 1,
linkf = c("logit", "probit", "cloglog"),
method = c("Vansteelandt", "Xie"), sens.ind = NULL, spec.ind = NULL,
start = NULL, control = gt.control(...), ...) {
call <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "groupn"), names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
gr <- model.extract(mf, "groupn")
if (!is.na(pos <- match(deparse(substitute(retest)), names(data))))
retest <- data[, pos]
Y <- model.response(mf, "any")
if (length(dim(Y)) == 1) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt))
model.matrix(mt, mf)
else matrix(, NROW(Y), 0)
linkf <- match.arg(linkf)
if ((method <- match.arg(method)) == "Vansteelandt") {
if (!is.null(retest))
warning("Retests cannot be used with Vansteelandt's method.")
fit <- gtreg.fit(Y, X, gr, sens, spec, linkf, start)
}
else {
if (is.null(retest))
fit <- EM(Y, X, gr, sens, spec, linkf, start, control)
else fit <- EM.ret(Y, X, gr, retest, sens, spec, linkf,
sens.ind, spec.ind, start, control)
}
fit <- c(fit, list(call = call, formula = formula, method = method,
link = linkf, terms = mt))
class(fit) <- "gt"
fit
}
gtreg.fit <- function (Y, X, groupn, sens, spec, linkf, start = NULL)
{
z <- tapply(Y, groupn, tail, n = 1)
num.g <- max(groupn)
K <- ncol(X)
sam <- length(Y)
if (is.null(start)) {
if (K == 1) {
cova.mean <- as.matrix(tapply(X, groupn, mean))
optim.meth <- "BFGS"
}
else {
temp <- by(X, groupn, colMeans)
cova.mean <- do.call(rbind, temp)
optim.meth <- "Nelder-Mead"
}
beta.group <- glm.fit(cova.mean, as.vector(z),
family = binomial(link = linkf))$coefficients
}
else {
beta.group <- start
names(beta.group) <- dimnames(X)[[2]]
optim.meth <- ifelse(K == 1, "BFGS", "Nelder-Mead")
}
logL <- function(beta) {
pijk <- switch(linkf, logit = plogis(X %*% beta), probit = pnorm(X %*%
beta), cloglog = 1 - exp(-exp(X %*% beta)))
prodp <- tapply(1 - pijk, groupn, prod)
-sum(z * log(sens + (1 - sens - spec) * prodp) +
(1 - z) * log(1 - sens - (1 - sens - spec) * prodp))
}
mod.fit <- optim(par = beta.group, fn = logL, method = optim.meth,
control = list(trace = 0, maxit = 1000), hessian = TRUE)
if (det(mod.fit$hessian) == 0)
mod.fit <- optim(par = beta.group, fn = logL, method = "SANN", hessian = TRUE)
logL0 <- function(beta) {
inter <- rep(beta, sam)
pijk <- switch(linkf, logit = plogis(inter), probit = pnorm(inter),
cloglog = 1 - exp(-exp(inter)))
prodp <- tapply(1 - pijk, groupn, prod)
-sum(z * log(sens + (1 - sens - spec) * prodp) +
(1 - z) * log(1 - sens - (1 - sens - spec) * prodp))
}
mod.fit0 <- optim(par = binomial()$linkfun(mean(z)),
fn = logL0, method = "BFGS", control = list(trace = 0, maxit = 1000))
nulld <- 2 * mod.fit0$value
residd <- 2 * mod.fit$value
xib <- X %*% mod.fit$par
pijk <- switch(linkf, logit = plogis(xib), probit = pnorm(xib),
cloglog = 1 - exp(-exp(xib)))
prodp <- tapply(1 - pijk, groupn, prod)
zhat <- sens + (1 - sens - spec) * prodp
residual <- z - zhat
aic <- residd + 2 * K
if (mod.fit$convergence == 0)
counts <- mod.fit$counts[[1]]
else warning("Maximum number of iterations exceeded.")
list(coefficients = mod.fit$par, hessian = mod.fit$hessian,
fitted.values = zhat, deviance = residd, df.residual = num.g - K,
null.deviance = nulld, df.null = num.g - 1, aic = aic, counts = counts,
residuals = residual, z = z)
}
EM <- function (Y, X, groupn, sens, spec, linkf, start = NULL, control = gt.control())
{
if (control$time)
start.time <- proc.time()
z <- tapply(Y, groupn, tail, n = 1)
num.g <- max(groupn)
K <- ncol(X)
if (is.null(start)) {
if (K == 1)
cova.mean <- as.matrix(tapply(X, groupn, mean))
else {
temp <- by(X, groupn, colMeans)
cova.mean <- do.call(rbind, temp)
}
beta.old <- lm.fit(cova.mean, z)$coefficients
}
else beta.old <- start
sam <- length(Y)
vec <- 1:sam
group.sizes <- tapply(Y, groupn, length)
diff <- 1
counts <- 1
extra.loop <- FALSE
next.loop <- TRUE
while (next.loop) {
xib <- X %*% beta.old
pijk <- switch(linkf, logit = plogis(xib),
probit = pnorm(xib), cloglog = 1 - exp(-exp(xib)))
prodp <- tapply(1 - pijk, groupn, prod)
den <- rep((1 - spec) * prodp + sens * (1 - prodp), group.sizes)
den2 <- rep(spec * prodp + (1 - sens) * (1 - prodp),
group.sizes)
expect <- rep(NA, times = sam)
for (i in vec) {
if (Y[i] == 0)
expect[i] <- (1 - sens) * pijk[i]/den2[i]
else expect[i] <- sens * pijk[i]/den[i]
}
if (!extra.loop) {
suppress <- function(w)
if(any(grepl("non-integer #successes in a binomial glm", w)))
invokeRestart("muffleWarning")
mod.fit <- withCallingHandlers(glm.fit(X, expect,
family = binomial(link = linkf)), warning = suppress)
diff <- max(abs((beta.old - mod.fit$coefficients)/beta.old))
beta.old <- mod.fit$coefficients
if (control$trace)
cat("beta is", beta.old, "\tdiff is", diff, "\n")
counts <- counts + 1
if (diff <= control$tol || counts > control$maxit)
extra.loop <- TRUE
}
else next.loop <- FALSE
}
erf <- 2 * pijk - 1
pt1 <- switch(linkf, logit = -exp(xib)/(1 + exp(xib))^2,
probit = sqrt(2) * xib * exp(-xib^2/2)/(sqrt(pi) * (1 -
erf)) - 2 * exp(-xib^2)/(pi * (1 - erf)^2), cloglog = -exp(xib))
pt2 <- switch(linkf, logit = 0, probit = (8 * exp(-xib^2/2) *
erf + 2 * xib * sqrt(2 * pi) * erf^2 - 2 * xib * sqrt(2 *
pi)) * exp(-xib^2/2)/((1 + erf)^2 * pi * (1 - erf)^2),
cloglog = -(exp(xib - exp(xib)) + exp(2 * xib - exp(xib)) -
exp(xib))/(exp(-exp(xib)) - 1)^2)
nm <- pt1 + expect * pt2
sign1 <- as.vector(sign(nm))
nn <- as.vector(sqrt(abs(nm)))
x2 <- X * nn
m <- (t(x2) %*% (sign1 * x2))
b <- array(NA, c(K, K, sum(group.sizes^2)))
p <- 1
for (i in vec) for (j in vec[groupn == groupn[i]]) {
wii <- ifelse(i == j, expect[i] - expect[i]^2, expect[i] *
(pijk[j] - expect[j]))
coe <- switch(linkf, logit = 1, probit = 8 * exp(-(xib[i]^2 +
xib[j]^2)/2)/((1 - erf[i]^2) * (1 - erf[j]^2) * pi),
cloglog = exp(xib[i] + xib[j])/((exp(-exp(xib[i])) -
1) * (exp(-exp(xib[j])) - 1)))
b[, , p] <- wii * coe * X[i, ] %*% t(X[j, ])
p <- p + 1
}
m1 <- apply(b, c(1, 2), sum)
H <- -(m + m1)
zhat <- sens + (1 - sens - spec) * prodp
residual <- z - zhat
residd <- -2 * sum(z * log(zhat) + (1 - z) * log(1 - zhat))
logL0 <- function(beta) {
inter <- rep(beta, sam)
pijk <- switch(linkf, logit = plogis(inter), probit = pnorm(inter),
cloglog = 1 - exp(-exp(inter)))
prodp <- tapply(1 - pijk, groupn, prod)
-sum(z * log(sens + (1 - sens - spec) * prodp) +
(1 - z) * log(1 - sens - (1 - sens - spec) * prodp))
}
mod.fit0 <- optim(par = binomial()$linkfun(mean(z)), fn = logL0,
method = "BFGS", control = list(trace = 0, maxit = 1000))
nulld <- 2 * mod.fit0$value
aic <- residd + 2 * K
if (diff > control$tol && counts > control$maxit)
warning("EM algorithm did not converge.")
if (control$time) {
end.time <- proc.time()
save.time <- end.time - start.time
cat("\n Number of minutes running:", round(save.time[3]/60, 2), "\n \n")
}
list(coefficients = beta.old, hessian = H, fitted.values = zhat,
deviance = residd, df.residual = num.g - K, null.deviance = nulld,
df.null = num.g - 1, aic = aic, counts = counts - 1, residuals = residual,
z = z)
}
EM.ret <- function (Y, X, groupn, ret, sens, spec, linkf,
sens.ind, spec.ind,
start = NULL, control = gt.control())
{
if (control$time)
start.time <- proc.time()
if (is.null(sens.ind))
sens.ind <- sens
if (is.null(spec.ind))
spec.ind <- spec
z <- tapply(Y, groupn, tail, n = 1)
num.g <- max(groupn)
K <- ncol(X)
if (is.null(start)) {
if (K == 1)
cova.mean <- as.matrix(tapply(X, groupn, mean))
else {
temp <- by(X, groupn, colMeans)
cova.mean <- do.call(rbind, temp)
}
beta.old <- lm.fit(cova.mean, z)$coefficients
}
else beta.old <- start
sam <- length(Y)
vec <- 1:sam
group.sizes <- tapply(Y, groupn, length)
diff <- 1
counts <- 1
extra.loop <- FALSE
next.loop <- TRUE
a0 <- ifelse(ret == 1, sens.ind, 1 - sens.ind)
a1 <- ifelse(ret == 0, spec.ind, 1 - spec.ind)
while (next.loop) {
xib <- X %*% beta.old
pijk <- switch(linkf, logit = plogis(xib),
probit = pnorm(xib), cloglog = 1 -
exp(-exp(xib)))
erf <- 2 * pijk - 1
prodp <- tapply(1 - pijk, groupn, prod)
den2 <- rep(spec * prodp + (1 - sens) * (1 - prodp),
group.sizes)
expect <- rep(NA, times = sam)
i <- 1
while (i <= sam) {
if (Y[i] == 0)
expect[i] <- (1 - sens) * pijk[i]/den2[i]
else {
vec1 <- vec[groupn == groupn[i]]
mb2 <- 1
for (l in vec1) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in vec1) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
den <- mb2 * sens + null * (1 - sens - spec)
for (l1 in vec1) {
temp <- a0[l1] * pijk[l1] + a1[l1] * (1 - pijk[l1])
num <- mb2/temp * a0[l1] * pijk[l1] * sens
expect[l1] <- num/den
}
i <- l1
}
i <- i + 1
}
expect[expect > 1] <- 1
expect[expect < 0] <- 0
if (!extra.loop) {
suppress <- function(w)
if (any(grepl("non-integer #successes in a binomial glm", w)))
invokeRestart("muffleWarning")
mod.fit <- withCallingHandlers(glm.fit(X, expect,
family = binomial(link = linkf)), warning = suppress)
diff <- max(abs((beta.old - mod.fit$coefficients)/beta.old))
beta.old <- mod.fit$coefficients
if (control$trace)
cat("beta is", beta.old, "\tdiff is", diff, "\n")
counts <- counts + 1
if (diff <= control$tol || counts > control$maxit)
extra.loop <- TRUE
}
else next.loop <- FALSE
}
pt1 <- switch(linkf, logit = -exp(xib)/(1 + exp(xib))^2,
probit = sqrt(2) * xib * exp(-xib^2/2)/(sqrt(pi) * (1 -
erf)) - 2 * exp(-xib^2)/(pi * (1 - erf)^2), cloglog = -exp(xib))
pt2 <- switch(linkf, logit = 0, probit = (8 * exp(-xib^2/2) *
erf + 2 * xib * sqrt(2 * pi) * erf^2 - 2 * xib * sqrt(2 *
pi)) * exp(-xib^2/2)/((1 + erf)^2 * pi * (1 - erf)^2),
cloglog = -(exp(xib - exp(xib)) + exp(2 * xib - exp(xib)) -
exp(xib))/(exp(-exp(xib)) - 1)^2)
nm <- pt1 + expect * pt2
sign1 <- as.vector(sign(nm))
nn <- as.vector(sqrt(abs(nm)))
x2 <- X * nn
m <- (t(x2) %*% (sign1 * x2))
m1 <- 0
for (i in vec) {
vec1 <- vec[groupn == groupn[i]]
if (Y[i] == 0) {
for (j in vec1) {
coe <- switch(linkf, logit = 1, probit = 8 * exp(-(xib[i]^2 +
xib[j]^2)/2)/((1 - erf[i]^2) * (1 - erf[j]^2) * pi),
cloglog = exp(xib[i] + xib[j])/((exp(-exp(xib[i])) -
1) * (exp(-exp(xib[j])) - 1)))
wii <- ifelse(i == j, expect[i] - expect[i]^2, expect[i] *
(pijk[j] - expect[j]))
tim <- wii * coe * X[i, ] %*% t(X[j, ])
m1 <- m1 + tim
}
}
else {
for (j in vec1) {
temp <- a0[j] * pijk[j] + a1[j] * (1 - pijk[j])
eii <- expect[i]/temp * a0[j] * pijk[j]
wii <- ifelse(i == j, expect[i] - expect[i]^2, eii - expect[i] * expect[j])
coe <- switch(linkf, logit = 1, probit = 8 * exp(-(xib[i]^2 +
xib[j]^2)/2)/((1 - erf[i]^2) * (1 - erf[j]^2) * pi),
cloglog = exp(xib[i] + xib[j])/((exp(-exp(xib[i])) -
1) * (exp(-exp(xib[j])) - 1)))
tim <- wii * coe * X[i, ] %*% t(X[j, ])
m1 <- m1 + tim
}
}
}
H <- -(m + m1)
zhat <- sens + (1 - sens - spec) * prodp
residual <- z - zhat
logl <- 0
for (grn in 1:num.g) {
if (z[grn] == 1) {
vec1 <- vec[groupn == grn]
mb2 <- 1
for (l in vec1) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in vec1) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
prob1 <- mb2 * sens + null * (1 - sens - spec)
} else prob1 <- 1 - zhat[grn]
logl <- logl - log(prob1)
}
aic <- 2 * logl + 2 * K
if (diff > control$tol && counts > control$maxit)
warning("EM algorithm did not converge.")
if (control$time) {
end.time <- proc.time()
save.time <- end.time - start.time
cat("\n Number of minutes running:", round(save.time[3]/60, 2), "\n \n")
}
list(coefficients = beta.old, hessian = H, fitted.values = zhat,
deviance = 2 * logl, aic = aic, counts = counts - 1, residuals = residual,
z = z)
}
gt.control <- function (tol = 0.0001, n.gibbs = 1000, n.burnin = 20,
maxit = 500, trace = FALSE, time = TRUE)
{
if (!is.numeric(tol) || tol <= 0)
stop("value of 'tol' must be > 0")
if (round(n.gibbs) != n.gibbs || n.gibbs <= 0)
stop("value of 'n.gibbs' must be a positive integer")
if (round(n.burnin) != n.burnin || n.burnin <= 0)
stop("value of 'n.burnin' must be a positive integer")
if (!is.numeric(maxit) || maxit <= 0)
stop("maximum number of iterations must be > 0")
list(tol = tol, n.gibbs = n.gibbs, n.burnin = n.burnin, maxit = maxit,
trace = trace, time = time)
}
gtreg.mp <- function (formula, data, coln, rown, arrayn, retest = NULL,
sens = 1, spec = 1, linkf = c("logit", "probit", "cloglog"),
sens.ind = NULL, spec.ind = NULL, start = NULL, control = gt.control(...), ...)
{
call <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "coln", "rown",
"arrayn"), names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
arrayn <- model.extract(mf, "arrayn")
rown <- model.extract(mf, "rown")
coln <- model.extract(mf, "coln")
if (!is.na(pos <- match(deparse(substitute(retest)), names(data))))
retest <- data[, pos]
Y <- model.response(mf, "any")
if (length(dim(Y)) == 1) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt))
model.matrix(mt, mf)
else matrix(, NROW(Y), 0)
linkf <- match.arg(linkf)
fit <- EM.mp(Y[, 1], Y[, 2], X, coln, rown, arrayn, retest,
sens, spec, linkf, sens.ind, spec.ind, start, control)
fit <- c(fit, list(call = call, formula = formula, link = linkf,
terms = mt))
class(fit) <- c("gt.mp", "gt")
fit
}
EM.mp <- function (col.resp, row.resp, X, coln, rown, sqn, ret, sens,
spec, linkf, sens.ind, spec.ind, start = NULL, control = gt.control())
{
if (control$time)
start.time <- proc.time()
if (is.null(sens.ind))
sens.ind <- sens
if (is.null(spec.ind))
spec.ind <- spec
len <- max(sqn)
diff <- 1
counts <- 1
sam <- length(sqn)
col.groupn <- coln[sqn == 1]
if (len > 1) {
for (i in 2:len) {
temp <- max(col.groupn) + coln[sqn == i]
col.groupn <- c(col.groupn, temp)
}
}
if (is.null(start)) {
mod.fit <- try(gtreg.fit(col.resp, X, col.groupn,
sens, spec, linkf))
if (class(mod.fit) == "try-error") {
row.groupn <- rown[sqn == 1]
if (len > 1) {
for (i in 2:len) {
temp <- max(row.groupn) + rown[sqn == i]
row.groupn <- c(row.groupn, temp)
}
}
mod.fit <- gtreg.fit(row.resp, X, row.groupn,
sens, spec, linkf)
}
beta.old <- mod.fit$coefficients
}
else beta.old <- start
extra.loop <- FALSE
next.loop <- TRUE
while (next.loop) {
xib <- X %*% beta.old
pijk.all <- switch(linkf, logit = plogis(xib),
probit = pnorm(xib), cloglog = 1 - exp(-exp(xib)))
expect.all <- numeric(0)
mat2 <- index <- 0
erf <- 2 * pijk.all - 1
for (arrayn in 1:len) {
index.r <- index.c <- vector("logical", length = sam)
for (i in 1:sam) {
if (rown[i] == 1 && sqn[i] == arrayn)
index.c[i] <- TRUE
else index.c[i] <- FALSE
if (coln[i] == 1 && sqn[i] == arrayn)
index.r[i] <- TRUE
else index.r[i] <- FALSE
}
n.row <- max(rown[index.r])
n.col <- max(coln[index.c])
rowresp <- row.resp[index.r]
colresp <- col.resp[index.c]
index <- max(index) + 1:(n.row * n.col)
if (!is.null(ret)) {
re.ind <- na.omit(cbind(coln[sqn == arrayn],
rown[sqn == arrayn], ret[sqn == arrayn]))
re <- ifelse(re.ind[, 3] == 1, sens.ind, 1 -
sens.ind)
re1 <- ifelse(re.ind[, 3] == 0, spec.ind, 1 -
spec.ind)
}
pijk <- matrix(pijk.all[sqn == arrayn], nrow = n.row)
a <- ifelse(rowresp == 1, sens, 1 - sens)
b <- ifelse(colresp == 1, sens, 1 - sens)
a1 <- ifelse(rowresp == 0, spec, 1 - spec)
b1 <- ifelse(colresp == 0, spec, 1 - spec)
mat <- array(NA, c(n.row, n.col, control$n.gibbs))
y <- matrix(0, nrow = n.row, ncol = n.col)
for (k in 1:(control$n.gibbs + control$n.burnin)) {
l <- 1
for (j in 1:n.col) for (i in 1:n.row) {
num <- a[i] * b[j] * pijk[i, j]
den.r <- ifelse(sum(y[i, ]) - y[i, j] > 0,
a[i], a1[i])
den.c <- ifelse(sum(y[, j]) - y[i, j] > 0,
b[j], b1[j])
den2 <- den.r * den.c * (1 - pijk[i, j])
if (!is.null(ret)) {
if (l <= length(re) && j == re.ind[l, 1] &&
i == re.ind[l, 2]) {
num <- num * re[l]
den2 <- den2 * re1[l]
l <- l + 1
}
}
den <- num + den2
if (den != 0) {
cond.p <- num/den
y[i, j] <- rbinom(1, 1, cond.p)
}
else y[i, j] <- 0
}
if (k > control$n.burnin) {
mat[, , k - control$n.burnin] <- y
vec <- as.vector(y)
if (extra.loop)
for (i1 in index[vec == 1]) for (j1 in index[vec ==
1]) {
bq <- switch(linkf, logit = 1, probit = 8 *
exp(-(xib[i1]^2 + xib[j1]^2)/2)/((1 - erf[i1]^2) *
(1 - erf[j1]^2) * pi), cloglog = exp(xib[i1] +
xib[j1])/((exp(-exp(xib[i1])) - 1) * (exp(-exp(xib[j1])) -
1))) * X[i1, ] %*% t(X[j1, ])
mat2 <- mat2 + bq
}
}
}
expect.m <- apply(mat, c(1, 2), mean)
expect <- as.vector(expect.m)
expect.all <- c(expect.all, expect)
}
if (!extra.loop) {
suppress <- function(w)
if(any(grepl("non-integer #successes in a binomial glm", w)))
invokeRestart("muffleWarning")
mod.fit <- withCallingHandlers(glm.fit(X, expect.all,
family = binomial(link = linkf)), warning = suppress)
diff <- max(abs((beta.old - mod.fit$coefficients)/beta.old))
beta.old <- mod.fit$coefficients
if (control$trace)
cat("beta is", beta.old, "\tdiff is", diff, "\n")
counts <- counts + 1
if (diff <= control$tol || counts > control$maxit)
extra.loop <- TRUE
}
else next.loop <- FALSE
}
index <- 0
first <- mat2/control$n.gibbs
second <- 0
for (arrayn in 1:len) {
n.row <- max(rown[sqn == arrayn])
n.col <- max(coln[sqn == arrayn])
index <- max(index) + 1:(n.row * n.col)
expect <- expect.all[index]
for (i1 in index) for (j1 in index) {
coe <- switch(linkf, logit = 1, probit = 8 * exp(-(xib[i1]^2 +
xib[j1]^2)/2)/((1 - erf[i1]^2) * (1 - erf[j1]^2) *
pi), cloglog = exp(xib[i1] + xib[j1])/((exp(-exp(xib[i1])) -
1) * (exp(-exp(xib[j1])) - 1)))
tim <- expect.all[i1] * expect.all[j1] * coe * X[i1,
] %*% t(X[j1, ])
second <- second + tim
}
}
m1 <- first - second
pt1 <- switch(linkf, logit = -exp(xib)/(1 + exp(xib))^2,
probit = sqrt(2) * xib * exp(-xib^2/2)/(sqrt(pi) * (1 -
erf)) - 2 * exp(-xib^2)/(pi * (1 - erf)^2), cloglog = -exp(xib))
pt2 <- switch(linkf, logit = 0, probit = (8 * exp(-xib^2/2) *
erf + 2 * xib * sqrt(2 * pi) * erf^2 - 2 * xib * sqrt(2 *
pi)) * exp(-xib^2/2)/((1 + erf)^2 * pi * (1 - erf)^2),
cloglog = -(exp(xib - exp(xib)) + exp(2 * xib - exp(xib)) -
exp(xib))/(exp(-exp(xib)) - 1)^2)
nm <- pt1 + expect.all * pt2
sign1 <- as.vector(sign(nm))
nn <- as.vector(sqrt(abs(nm)))
x2 <- X * nn
m <- (t(x2) %*% (sign1 * x2))
H <- -(m + m1)
if (diff > control$tol && counts > control$maxit)
warning("EM algorithm did not converge.")
if (control$time) {
end.time <- proc.time()
save.time <- end.time - start.time
cat("\n Number of minutes running:", round(save.time[3]/60, 2), "\n \n")
}
list(coefficients = beta.old, hessian = H, Gibbs.sample.size = control$n.gibbs,
counts = counts - 1)
}
sim.gt <- function (x = NULL, gshape = 20, gscale = 2, par,
linkf = c("logit", "probit", "cloglog"),
sample.size, group.size, sens = 1, spec = 1, sens.ind = NULL, spec.ind = NULL)
{
if (is.null(sens.ind))
sens.ind <- sens
if (is.null(spec.ind))
spec.ind <- spec
if (is.null(x)) {
x <- rgamma(n = sample.size, shape = gshape, scale = gscale)
X <- cbind(1, x)
}
else {
X <- cbind(1, x)
sample.size <- nrow(X)
}
linkf <- match.arg(linkf)
pijk <- switch(linkf, logit = plogis(X %*% par),
probit = pnorm(X %*% par),
cloglog = 1 - exp(-exp(X %*% par)))
ind <- rbinom(n = sample.size, size = 1, prob = pijk)
num.g <- ceiling(sample.size/group.size)
vec <- 1:sample.size
groupn <- rep(1:num.g, each = group.size)[vec]
save.sum <- tapply(ind, groupn, sum)
save.group <- as.vector(ifelse(save.sum > 0, 1, 0))
save.obs <- rep(NA, num.g)
ret <- rep(NA, sample.size)
for (i in 1:num.g)
save.obs[i] <- ifelse(save.group[i] == 1, rbinom(1, 1, sens),
1 - rbinom(1, 1, spec))
gres <- rep(save.obs, each = group.size)[vec]
for (i in vec) {
if (gres[i] == 1)
ret[i] <- ifelse(ind[i] == 1, rbinom(1,
1, sens.ind), 1 - rbinom(1, 1, spec.ind))
}
grd <- data.frame(gres = gres, x = x, groupn = groupn, ind = ind, retest = ret)
if (ncol(X) > 2)
for (i in 2:ncol(X))
colnames(grd)[i] <- paste("x", i - 1, sep="")
grd
}
sim.mp <- function (x = NULL, gshape = 20, gscale = 2, par,
linkf = c("logit", "probit", "cloglog"),
n.row, n.col, sens = 1, spec = 1, sens.ind = NULL, spec.ind = NULL)
{
if (is.null(sens.ind))
sens.ind <- sens
if (is.null(spec.ind))
spec.ind <- spec
if (length(n.row) != length(n.col))
stop("vector n.row and n.col must have the same length")
linkf <- match.arg(linkf)
if (is.null(x)) {
sample.size <- sum(n.col * n.row)
x <- rgamma(n = sample.size, shape = gshape, scale = gscale)
X <- cbind(1, x)
}
else {
X <- cbind(1, x)
sample.size <- nrow(X)
if (sum(n.col * n.row) != sample.size)
stop("n.row and n.col not consistent with the sample size")
}
len <- length(n.row)
pijk <- switch(linkf, logit = plogis(X %*% par),
probit = pnorm(X %*% par),
cloglog = 1 - exp(-exp(X %*% par)))
ind <- rbinom(n = sample.size, size = 1, prob = pijk)
individual <- col.groupn <- row.groupn <- numeric(0)
rowr <- colr <- numeric(0)
ret <- rep(NA, sample.size)
for (i in 1:len) {
if (i > 1)
index <- seq(max(index) + 1, length = (n.row * n.col)[i])
else index <- 1:(n.row * n.col)[1]
indm <- matrix(ind[index], nrow = n.row[i])
col.resp <- apply(indm, MARGIN = 2, FUN = sum)
col.resp <- ifelse(col.resp > 0, 1, 0)
col.err <- rep(NA, n.col[i])
for (j in 1:n.col[i])
col.err[j] <- ifelse(col.resp[j] == 1, rbinom(1, 1, sens),
1 - rbinom(1, 1, spec))
row.resp <- apply(indm, MARGIN = 1, FUN = sum)
row.resp <- ifelse(row.resp > 0, 1, 0)
row.err <- rep(NA, n.row[i])
for (j in 1:n.row[i])
row.err[j] <- ifelse(row.resp[j] == 1, rbinom(1, 1, sens),
1 - rbinom(1, 1, spec))
temp.c <- rep(1:n.col[i], each = n.row[i])
col.groupn <- c(col.groupn, temp.c)
temp.r <- rep(1:n.row[i], n.col[i])
row.groupn <- c(row.groupn, temp.r)
temp2.c <- rep(col.err, each = n.row[i])
colr <- c(colr, temp2.c)
temp2.r <- rep(row.err, n.col[i])
rowr <- c(rowr, temp2.r)
if (all(row.err == 0)) {
for (j in index) {
if (colr[j] == 1)
ret[j] <- ifelse(ind[j] == 1, rbinom(1,
1, sens.ind), 1 - rbinom(1, 1, spec.ind))
}
}
else {
if (all(col.err == 0)) {
for (j in index) {
if (rowr[j] == 1)
ret[j] <- ifelse(ind[j] == 1, rbinom(1,
1, sens.ind), 1 - rbinom(1, 1, spec.ind))
}
}
else {
for (j in index) {
if (rowr[j] == 1 && colr[j] == 1)
ret[j] <- ifelse(ind[j] == 1, rbinom(1, 1,
sens.ind), 1 - rbinom(1, 1, spec.ind))
}
}
}
individual <- c(individual, list(indm))
}
sq <- rep(1:len, n.col * n.row)
if (all(colr == 0) && all(rowr == 0))
return(NULL)
grd <- data.frame(x = x, col.resp = colr,
row.resp = rowr, coln = col.groupn, rown = row.groupn,
arrayn = sq, retest = ret)
if (ncol(X) > 2)
for (i in 1:(ncol(X) - 1))
colnames(grd)[i] <- paste("x", i, sep="")
list(dframe = grd, ind = individual, prob = as.vector(pijk))
}
summary.gt <- function (object, ...)
{
coef.p <- object$coefficients
cov.mat <- solve(object$hessian)
dimnames(cov.mat) <- list(names(coef.p), names(coef.p))
var.cf <- diag(cov.mat)
s.err <- sqrt(var.cf)
zvalue <- coef.p/s.err
dn <- c("Estimate", "Std. Error")
pvalue <- 2 * pnorm(-abs(zvalue))
coef.table <- cbind(coef.p, s.err, zvalue, pvalue)
dimnames(coef.table) <- list(names(coef.p), c(dn, "z value",
"Pr(>|z|)"))
keep <- match(c("call", "link", "aic", "deviance", "df.residual",
"null.deviance", "df.null", "counts", "method", "z"),
names(object), 0)
ans <- c(object[keep], list(coefficients = coef.table, deviance.resid = residuals(object,
type = "deviance"), cov.mat = cov.mat))
class(ans) <- "summary.gt"
return(ans)
}
print.summary.gt <- function (x, digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"), ...)
{
obj <- x
cat("\nCall:\n")
cat(paste(deparse(obj$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat("Deviance Residuals: \n")
if (length(obj$z) > 5) {
obj$deviance.resid <- quantile(obj$deviance.resid, na.rm = TRUE)
names(obj$deviance.resid) <- c("Min", "1Q", "Median",
"3Q", "Max")
}
print.default(obj$deviance.resid, digits = digits, na.print = "",
print.gap = 2)
cat("\nCoefficients:\n")
coefs <- obj$coefficients
printCoefmat(coefs, digits = digits, signif.stars = signif.stars,
na.print = "NA", ...)
if (!is.null(unlist(obj["df.null"])))
cat("\n", apply(cbind(paste(format(c("Null", "Residual"),
justify = "right"), "deviance:"), format(unlist(obj[c("null.deviance",
"deviance")]), digits = 4), " on", format(unlist(obj[c("df.null",
"df.residual")])), " degrees of freedom\n"), 1, paste,
collapse = " "), sep = "")
if (obj$method == "Vansteelandt")
cat("AIC: ", format(obj$aic, digits = 4), "\n\n", "Number of iterations in optim(): ",
obj$counts, "\n", sep = "")
else {
cat("AIC: ", format(obj$aic, digits = 4), "\n\n", "Number of iterations in EM: ",
obj$counts, "\n", sep = "")
}
cat("\n")
invisible(obj)
}
predict.gt <- function (object, newdata, type = c("link", "response"), se.fit = FALSE,
conf.level = NULL, na.action = na.pass, ...)
{
tt <- terms(object)
Terms <- delete.response(tt)
if (missing(newdata) || is.null(newdata)) {
m <- model.frame(object)
newd <- model.matrix(Terms, m)
}
else {
m <- model.frame(Terms, newdata, na.action = na.action)
newd <- model.matrix(Terms, m)
}
type <- match.arg(type)
lin.pred <- as.vector(newd %*% object$coefficients)
link <- object$link
res <- switch(link, logit = plogis(lin.pred), probit = pnorm(lin.pred),
cloglog = 1 - exp(-exp(lin.pred)))
if (type == "response")
pred <- res
else pred <- lin.pred
if (se.fit) {
cov <- solve(object$hessian)
var.lin.pred <- diag(newd %*% cov %*% t(newd))
var.res <- switch(link, logit = exp(2 * lin.pred)/(1 +
exp(lin.pred))^4, probit = dnorm(lin.pred)^2, cloglog = (exp(-exp(lin.pred)) *
exp(lin.pred))^2) * var.lin.pred
if (type == "response")
se <- sqrt(var.res)
else se <- sqrt(var.lin.pred)
if (!is.null(conf.level)) {
alpha <- 1 - conf.level
lower <- lin.pred - qnorm(1 - alpha/2) * sqrt(var.lin.pred)
upper <- lin.pred + qnorm(1 - alpha/2) * sqrt(var.lin.pred)
res.lower <- switch(link, logit = plogis(lower),
probit = pnorm(lower), cloglog = 1 - exp(-exp(lower)))
res.upper <- switch(link, logit = plogis(upper),
probit = pnorm(upper), cloglog = 1 - exp(-exp(upper)))
if (type == "response") {
lwr <- res.lower
upr <- res.upper
}
else {
lwr <- lower
upr <- upper
}
}
}
names(pred) <- 1:length(lin.pred)
if (!is.null(conf.level)) {
list(fit = pred, se.fit = se, lower = lwr, upper = upr)
}
else if (se.fit)
list(fit = pred, se.fit = se)
else pred
}
residuals.gt <- function (object, type = c("deviance", "pearson", "response"),
...)
{
type <- match.arg(type)
r <- object$residuals
zhat <- object$fitted.values
z <- object$z
res <- switch(type, response = r, pearson = r/sqrt(zhat *
(1 - zhat)), deviance = sqrt(-2 * (z * log(zhat) + (1 -
z) * log(1 - zhat))) * sign(z - zhat))
res
}
summary.gt.mp <- function (object, ...)
{
coef.p <- object$coefficients
cov.mat <- solve(object$hessian)
dimnames(cov.mat) <- list(names(coef.p), names(coef.p))
var.cf <- diag(cov.mat)
s.err <- sqrt(var.cf)
zvalue <- coef.p/s.err
dn <- c("Estimate", "Std. Error")
pvalue <- 2 * pnorm(-abs(zvalue))
coef.table <- cbind(coef.p, s.err, zvalue, pvalue)
dimnames(coef.table) <- list(names(coef.p), c(dn, "z value",
"Pr(>|z|)"))
keep <- match(c("call", "link", "Gibbs.sample.size", "counts"), names(object), 0)
ans <- c(object[keep], list(coefficients = coef.table, cov.mat = cov.mat))
class(ans) <- "summary.gt.mp"
return(ans)
}
print.summary.gt.mp <- function (x, digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"),
...)
{
obj <- x
cat("\nCall:\n")
cat(paste(deparse(obj$call), sep = "\n", collapse = "\n"),
"\n", sep = "")
cat("\nCoefficients:\n")
coefs <- obj$coefficients
printCoefmat(coefs, digits = digits, signif.stars = signif.stars,
na.print = "NA", ...)
cat("\nNumber of Gibbs samples generated in each E step: ",
obj$Gibbs.sample.size, "\n", "Number of iterations in EM algorithm: ",
obj$counts, "\n", sep = "")
cat("\n")
invisible(obj)
}
print.gt <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
if (length(coef(x))) {
cat("Coefficients:\n")
print.default(format(coef(x), digits = digits), print.gap = 2,
quote = FALSE)
}
else cat("No coefficients\n")
if (!is.null(x$df.null)) {
cat("\nDegrees of Freedom:", x$df.null, "Total (i.e. Null); ",
x$df.residual, "Residual\n")
cat("Null Deviance:\t ", format(signif(x$null.deviance,
digits)), "\nResidual Deviance:", format(signif(x$deviance,
digits)), "\tAIC:", format(signif(x$aic, digits)), "\n")
}
invisible(x)
}
gtreg.halving <- function(formula, data, groupn, subg, retest, sens = 1, spec = 1,
linkf = c("logit", "probit", "cloglog"),
sens.ind = NULL, spec.ind = NULL,
start = NULL, control = gt.control(...), ...) {
call <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "groupn"), names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
gr <- model.extract(mf, "groupn")
if (!is.na(pos <- match(deparse(substitute(retest)), names(data))))
retest <- data[, pos]
if (!is.na(pos <- match(deparse(substitute(subg)), names(data))))
subg <- data[, pos]
Y <- model.response(mf, "any")
if (length(dim(Y)) == 1) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt))
model.matrix(mt, mf)
else matrix(, NROW(Y), 0)
linkf <- match.arg(linkf)
fit <- EM.halving(Y, X, gr, subg, retest, sens, spec, linkf,
sens.ind, spec.ind, start, control)
fit <- c(fit, list(call = call, formula = formula, method = "Xie",
link = linkf, terms = mt))
class(fit) <- "gt"
fit
}
EM.halving <- function (Y, X, groupn, subg, ret, sens, spec, linkf,
sens.ind, spec.ind,
start = NULL, control = gt.control())
{
if (control$time)
start.time <- proc.time()
if (is.null(sens.ind))
sens.ind <- sens
if (is.null(spec.ind))
spec.ind <- spec
z <- tapply(Y, groupn, tail, n = 1)
num.g <- max(groupn)
K <- ncol(X)
if (is.null(start)) {
if (K == 1)
cova.mean <- as.matrix(tapply(X, groupn, mean))
else {
temp <- by(X, groupn, colMeans)
cova.mean <- do.call(rbind, temp)
}
beta.old <- lm.fit(cova.mean, z)$coefficients
} else beta.old <- start
sam <- length(Y)
vec <- 1:sam
group.sizes <- tapply(Y, groupn, length)
diff <- 1
counts <- 1
extra.loop <- FALSE
next.loop <- TRUE
a0 <- ifelse(ret == 1, sens.ind, 1 - sens.ind)
a1 <- ifelse(ret == 0, spec.ind, 1 - spec.ind)
while (next.loop) {
xib <- X %*% beta.old
pijk <- switch(linkf, logit = plogis(xib),
probit = pnorm(xib), cloglog = 1 -
exp(-exp(xib)))
erf <- 2 * pijk - 1
prodp <- tapply(1 - pijk, groupn, prod)
den2 <- rep(spec * prodp + (1 - sens) * (1 - prodp),
group.sizes)
expect <- rep(NA, times = sam)
i <- 1
while (i <= sam) {
if (Y[i] == 0)
expect[i] <- (1 - sens) * pijk[i]/den2[i]
else {
if (subg[i] == 0) {
vec1 <- vec[groupn == groupn[i]]
gs <- length(vec1)
sub1 <- vec1[1:ceiling(gs/2)]
sub2 <- vec1[(ceiling(gs/2) + 1):gs]
if (subg[vec1[gs]] == 0) {
den <- (1-spec)*spec^2*prod(1-pijk[sub1])*prod(1-pijk[sub2])+
spec*(1-sens)*sens*prod(1-pijk[sub1])*(1-prod(1-pijk[sub2]))+
spec*(1-sens)*sens*prod(1-pijk[sub2])*(1-prod(1-pijk[sub1]))+
(1-sens)^2*sens*(1-prod(1-pijk[sub1]))*(1-prod(1-pijk[sub2]))
ab1 <- (1-sens)*sens*(spec*prod(1-pijk[sub2])+
(1-sens)*(1-prod(1-pijk[sub2])))
ab2 <- (1-sens)*sens*(spec*prod(1-pijk[sub1])+
(1-sens)*(1-prod(1-pijk[sub1])))
for (l1 in sub1) {
expect[l1]<-ab1*pijk[l1]/den
}
for (l1 in sub2) {
expect[l1]<-ab2*pijk[l1]/den
}
}
if (subg[vec1[gs]] == 1) {
mb2 <- 1
for (l in sub2) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in sub2) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
den <- (1-spec)^2*spec*null*prod(1-pijk[sub1])+
(1-spec)*(1-sens)*sens*null*(1-prod(1-pijk[sub1]))+
spec*sens^2*(mb2-null)*prod(1-pijk[sub1])+
(1-sens)*sens^2*(mb2-null)*(1-prod(1-pijk[sub1]))
ab1 <- (1-sens)*sens*(mb2*sens+null*(1-sens-spec))
for (l1 in sub1) {
expect[l1]<-ab1*pijk[l1]/den
}
for (l1 in sub2) {
temp <- a0[l1] * pijk[l1] + a1[l1] * (1 - pijk[l1])
num <- mb2/temp * a0[l1] * pijk[l1] * sens^2*(spec*prod(1-pijk[sub1])+
(1-sens)*(1-prod(1-pijk[sub1])))
expect[l1]<-num/den
}
}
i <- l1
} else {
vec1 <- vec[groupn == groupn[i]]
gs <- length(vec1)
sub1 <- vec1[1:ceiling(gs/2)]
sub2 <- vec1[(ceiling(gs/2) + 1):gs]
if (subg[vec1[gs]] == 0) {
mb2 <- 1
for (l in sub1) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in sub1) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
den <- (1-spec)^2*spec*null*prod(1-pijk[sub2])+
(1-spec)*(1-sens)*sens*null*(1-prod(1-pijk[sub2]))+
spec*sens^2*(mb2-null)*prod(1-pijk[sub2])+
(1-sens)*sens^2*(mb2-null)*(1-prod(1-pijk[sub2]))
ab1 <- (1-sens)*sens*(mb2*sens+null*(1-sens-spec))
for (l1 in sub1) {
temp <- a0[l1]*pijk[l1]+a1[l1]*(1-pijk[l1])
num <- mb2/temp*a0[l1]*pijk[l1]*sens^2*(spec*prod(1-pijk[sub2])+
(1-sens)*(1-prod(1-pijk[sub2])))
expect[l1]<-num/den
}
for (l1 in sub2) {
expect[l1]<-ab1*pijk[l1]/den
}
}
if (subg[vec1[gs]] == 1) {
mb2 <- 1
for (l in sub1) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in sub1) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
mb2a <- 1
for (l in sub2) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2a <- mb2a * temp
}
nulla <- 1
for (l in sub2) {
temp <- a1[l] * (1 - pijk[l])
nulla <- nulla * temp
}
den <- (1-spec)^3*null*nulla+
(1-spec)*sens^2*null*(mb2a-nulla)+
(1-spec)*sens^2*(mb2-null)*nulla+
sens^3*(mb2-null)*(mb2a-nulla)
for (l1 in sub1) {
temp <- a0[l1]*pijk[l1]+a1[l1]*(1-pijk[l1])
num <- mb2/temp*a0[l1]*pijk[l1]*sens^2*(mb2a*sens+nulla*(1-sens-spec))
expect[l1]<-num/den
}
for (l1 in sub2) {
temp <- a0[l1]*pijk[l1]+a1[l1]*(1-pijk[l1])
num <- mb2a/temp*a0[l1]*pijk[l1]*sens^2*(mb2*sens+null*(1-sens-spec))
expect[l1]<-num/den
}
}
i <- l1
}
}
i <- i + 1
}
expect[expect > 1] <- 1
expect[expect < 0] <- 0
if (!extra.loop) {
suppress <- function(w)
if (any(grepl("non-integer #successes in a binomial glm", w)))
invokeRestart("muffleWarning")
mod.fit <- withCallingHandlers(glm.fit(X, expect,
family = binomial(link = linkf)), warning = suppress)
diff <- max(abs((beta.old - mod.fit$coefficients)/beta.old))
beta.old <- mod.fit$coefficients
if (control$trace)
cat("beta is", beta.old, "\tdiff is", diff, "\n")
counts <- counts + 1
if (diff <= control$tol || counts > control$maxit)
extra.loop <- TRUE
}
else next.loop <- FALSE
}
pt1 <- switch(linkf, logit = -exp(xib)/(1 + exp(xib))^2,
probit = sqrt(2) * xib * exp(-xib^2/2)/(sqrt(pi) * (1 -
erf)) - 2 * exp(-xib^2)/(pi * (1 - erf)^2), cloglog = -exp(xib))
pt2 <- switch(linkf, logit = 0, probit = (8 * exp(-xib^2/2) *
erf + 2 * xib * sqrt(2 * pi) * erf^2 - 2 * xib * sqrt(2 *
pi)) * exp(-xib^2/2)/((1 + erf)^2 * pi * (1 - erf)^2),
cloglog = -(exp(xib - exp(xib)) + exp(2 * xib - exp(xib)) -
exp(xib))/(exp(-exp(xib)) - 1)^2)
nm <- pt1 + expect * pt2
sign1 <- as.vector(sign(nm))
nn <- as.vector(sqrt(abs(nm)))
x2 <- X * nn
m <- (t(x2) %*% (sign1 * x2))
m1 <- 0
i <- 1
while (i <= sam) {
vec1 <- vec[groupn == groupn[i]]
gs <- length(vec1)
if (Y[i] == 0) {
for (j in vec1) {
wii <- ifelse(i == j, expect[i] - expect[i]^2, expect[i] *
(pijk[j] - expect[j]))
tim <- wii * X[i, ] %*% t(X[j, ])
m1 <- m1 + tim
}
} else {
sub1 <- vec1[1:ceiling(gs/2)]
sub2 <- vec1[(ceiling(gs/2) + 1):gs]
for (i in sub1) {
for (j in sub1) {
if (subg[j] == 0) {
eii <- expect[i] * pijk[j]
} else {
temp <- a0[j] * pijk[j] + a1[j] * (1 - pijk[j])
eii <- expect[i]/temp * a0[j] * pijk[j]
}
wii <- ifelse(i == j, expect[i] - expect[i]^2, eii - expect[i] * expect[j])
tim <- wii * X[i, ] %*% t(X[j, ])
m1 <- m1 + tim
}
for (j in sub2) {
if (subg[j] == 0) {
temp<-spec*prod(1-pijk[sub2])+(1-sens)*(1-prod(1-pijk[sub2]))
eii <- expect[i]*(1-sens)*pijk[j]/temp
} else {
mb2a <- 1
for (l in sub2) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2a <- mb2a * temp
}
nulla <- 1
for (l in sub2) {
temp <- a1[l] * (1 - pijk[l])
nulla <- nulla * temp
}
temp <- a0[j]*pijk[j]+a1[j]*(1-pijk[j])
tempa <- mb2a * sens + nulla * (1 - sens - spec)
eii <- expect[i]/tempa*sens*a0[j]*pijk[j]*mb2a/temp
}
wii <- ifelse(i == j, expect[i] - expect[i]^2, eii - expect[i] * expect[j])
tim <- wii * X[i, ] %*% t(X[j, ])
m1 <- m1 + tim
}
}
for (i in sub2) {
for (j in sub1) {
if (subg[j] == 0) {
temp<-spec*prod(1-pijk[sub1])+(1-sens)*(1-prod(1-pijk[sub1]))
eii <- expect[i] * (1-sens)* pijk[j]/temp
} else {
mb2 <- 1
for (l in sub1) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in sub1) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
temp <- a0[j]*pijk[j]+a1[j]*(1-pijk[j])
tempa <- mb2*sens+null*(1-sens-spec)
eii <- expect[i]/tempa*sens*a0[j]*pijk[j]*mb2/temp
}
wii <- ifelse(i == j, expect[i] - expect[i]^2, eii - expect[i] * expect[j])
tim <- wii * X[i, ] %*% t(X[j, ])
m1 <- m1 + tim
}
for (j in sub2) {
if (subg[j] == 0) {
eii <- expect[i] * pijk[j]
} else {
temp <- a0[j] * pijk[j] + a1[j] * (1 - pijk[j])
eii <- expect[i]/temp * a0[j] * pijk[j]
}
wii <- ifelse(i == j, expect[i] - expect[i]^2, eii - expect[i] * expect[j])
tim <- wii * X[i, ] %*% t(X[j, ])
m1 <- m1 + tim
}
}
}
i <- i + 1
}
H <- -(m + m1)
zhat <- sens + (1 - sens - spec) * prodp
residual <- z - zhat
logl <- 0
for (grn in 1:num.g) {
if (z[grn] == 1) {
vec1 <- vec[groupn == grn]
gs <- length(vec1)
sub1 <- vec1[1:ceiling(gs/2)]
sub2 <- vec1[(ceiling(gs/2) + 1):gs]
if (subg[vec1[1]] == 0) {
if (subg[vec1[gs]] == 0) {
prob1 <- (1-spec)*spec^2*prod(1-pijk[sub1])*prod(1-pijk[sub2])+
spec*(1-sens)*sens*prod(1-pijk[sub1])*(1-prod(1-pijk[sub2]))+
spec*(1-sens)*sens*prod(1-pijk[sub2])*(1-prod(1-pijk[sub1]))+
(1-sens)^2*sens*(1-prod(1-pijk[sub1]))*(1-prod(1-pijk[sub2]))
}
if (subg[vec1[gs]] == 1) {
mb2 <- 1
for (l in sub2) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in sub2) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
prob1 <- (1-spec)^2*spec*null*prod(1-pijk[sub1])+
(1-spec)*(1-sens)*sens*null*(1-prod(1-pijk[sub1]))+
spec*sens^2*(mb2-null)*prod(1-pijk[sub1])+
(1-sens)*sens^2*(mb2-null)*(1-prod(1-pijk[sub1]))
}
} else {
if (subg[vec1[gs]] == 0) {
mb2 <- 1
for (l in sub1) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in sub1) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
prob1 <- (1-spec)^2*spec*null*prod(1-pijk[sub2])+
(1-spec)*(1-sens)*sens*null*(1-prod(1-pijk[sub2]))+
spec*sens^2*(mb2-null)*prod(1-pijk[sub2])+
(1-sens)*sens^2*(mb2-null)*(1-prod(1-pijk[sub2]))
}
if (subg[vec1[gs]] == 1) {
mb2 <- 1
for (l in sub1) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2 <- mb2 * temp
}
null <- 1
for (l in sub1) {
temp <- a1[l] * (1 - pijk[l])
null <- null * temp
}
mb2a <- 1
for (l in sub2) {
temp <- a0[l] * pijk[l] + a1[l] * (1 - pijk[l])
mb2a <- mb2a * temp
}
nulla <- 1
for (l in sub2) {
temp <- a1[l] * (1 - pijk[l])
nulla <- nulla * temp
}
prob1 <- (1-spec)^3*null*nulla+
(1-spec)*sens^2*null*(mb2a-nulla)+
(1-spec)*sens^2*(mb2-null)*nulla+
sens^3*(mb2-null)*(mb2a-nulla)
}
}
} else prob1 <- 1 - zhat[grn]
logl <- logl - log(prob1)
}
aic <- 2 * logl + 2 * K
if (diff > control$tol && counts > control$maxit)
warning("EM algorithm did not converge.")
if (control$time) {
end.time <- proc.time()
save.time <- end.time - start.time
cat("\n Number of minutes running:", round(save.time[3]/60, 2), "\n \n")
}
list(coefficients = beta.old, hessian = H, fitted.values = zhat,
deviance = 2 * logl, aic = aic,
counts = counts - 1, residuals = residual, z = z)
}
sim.halving <- function (x = NULL, gshape = 20, gscale = 2, par,
linkf = c("logit", "probit", "cloglog"),
sample.size, group.size, sens = 1, spec = 1,
sens.ind = NULL, spec.ind = NULL)
{
if (is.null(sens.ind))
sens.ind <- sens
if (is.null(spec.ind))
spec.ind <- spec
if (is.null(x)) {
x <- rgamma(n = sample.size, shape = gshape, scale = gscale)
X <- cbind(1, x)
}
else {
X <- cbind(1, x)
sample.size <- nrow(X)
}
linkf <- match.arg(linkf)
pijk <- switch(linkf, logit = plogis(X %*% par),
probit = pnorm(X %*% par),
cloglog = 1 - exp(-exp(X %*% par)))
ind <- rbinom(n = sample.size, size = 1, prob = pijk)
num.g <- ceiling(sample.size/group.size)
vec <- 1:sample.size
groupn <- rep(1:num.g, each = group.size)[vec]
save.sum <- tapply(ind, groupn, sum)
save.group <- as.vector(ifelse(save.sum > 0, 1, 0))
save.obs <- rep(NA, num.g)
subgroup <- ret <- rep(NA, sample.size)
for (grn in 1:num.g) {
vec1 <- vec[groupn == grn]
gs <- length(vec1)
save.obs[grn] <- ifelse(save.group[grn] == 1, rbinom(1, 1, sens),
1 - rbinom(1, 1, spec))
if (save.obs[grn] == 1) {
sub1 <- vec1[1:ceiling(gs/2)]
sub2 <- vec1[(ceiling(gs/2) + 1):gs]
tZ1 <- sum(ind[sub1])
tZ2 <- sum(ind[sub2])
Z1 <- ifelse(tZ1 == 1, rbinom(1,
1, sens), 1 - rbinom(1, 1, spec))
Z2 <- ifelse(tZ2 == 1, rbinom(1,
1, sens), 1 - rbinom(1, 1, spec))
if (Z1 == 1) {
for (i1 in sub1) {
ret[i1] <- ifelse(ind[i1] == 1, rbinom(1,
1, sens.ind), 1 - rbinom(1, 1, spec.ind))
}
}
if (Z2 == 1) {
for (i1 in sub2) {
ret[i1] <- ifelse(ind[i1] == 1, rbinom(1,
1, sens.ind), 1 - rbinom(1, 1, spec.ind))
}
}
subgroup[sub1] <- Z1
subgroup[sub2] <- Z2
}
}
gres <- rep(save.obs, each = group.size)[vec]
grd <- data.frame(gres = gres, x = x, groupn = groupn, ind = ind,
retest = ret, subgroup = subgroup)
if (ncol(X) > 2)
for (i in 2:ncol(X))
colnames(grd)[i] <- paste("x", i - 1, sep="")
grd
}
|
1834f90b312ddd6bff871494021810049d4ff3f4 | cef3b5e2588a7377281a8f627a552350059ca68b | /cran/paws.application.integration/man/swf_poll_for_decision_task.Rd | c16e34fc4f57260bba4e901857f695a511c8b265 | [
"Apache-2.0"
] | permissive | sanchezvivi/paws | b1dc786a9229e0105f0f128d5516c46673cb1cb5 | 2f5d3f15bf991dcaa6a4870ed314eb7c4b096d05 | refs/heads/main | 2023-02-16T11:18:31.772786 | 2021-01-17T23:50:41 | 2021-01-17T23:50:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,959 | rd | swf_poll_for_decision_task.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/swf_operations.R
\name{swf_poll_for_decision_task}
\alias{swf_poll_for_decision_task}
\title{Used by deciders to get a DecisionTask from the specified decision
taskList}
\usage{
swf_poll_for_decision_task(domain, taskList, identity, nextPageToken,
maximumPageSize, reverseOrder)
}
\arguments{
\item{domain}{[required] The name of the domain containing the task lists to poll.}
\item{taskList}{[required] Specifies the task list to poll for decision tasks.
The specified string must not start or end with whitespace. It must not
contain a \code{:} (colon), \code{/} (slash), \code{|} (vertical bar), or any control
characters (\verb{U+0000`-`U+001f} | \verb{U+007f`-`U+009f}). Also, it must not \emph{be}
the literal string \code{arn}.}
\item{identity}{Identity of the decider making the request, which is recorded in the
DecisionTaskStarted event in the workflow history. This enables
diagnostic tracing when problems arise. The form of this identity is
user defined.}
\item{nextPageToken}{If \code{NextPageToken} is returned there are more results available. The
value of \code{NextPageToken} is a unique pagination token for each page.
Make the call again using the returned token to retrieve the next page.
Keep all other arguments unchanged. Each pagination token expires after
60 seconds. Using an expired pagination token will return a \code{400} error:
"\verb{Specified token has exceeded its maximum lifetime}".
The configured \code{maximumPageSize} determines how many results can be
returned in a single call.
The \code{nextPageToken} returned by this action cannot be used with
GetWorkflowExecutionHistory to get the next page. You must call
PollForDecisionTask again (with the \code{nextPageToken}) to retrieve the
next page of history records. Calling PollForDecisionTask with a
\code{nextPageToken} doesn't return a new decision task.}
\item{maximumPageSize}{The maximum number of results that are returned per call. Use
\code{nextPageToken} to obtain further pages of results.
This is an upper limit only; the actual number of results returned per
call may be fewer than the specified maximum.}
\item{reverseOrder}{When set to \code{true}, returns the events in reverse order. By default the
results are returned in ascending order of the \code{eventTimestamp} of the
events.}
}
\description{
Used by deciders to get a DecisionTask from the specified decision
\code{taskList}. A decision task may be returned for any open workflow
execution that is using the specified task list. The task includes a
paginated view of the history of the workflow execution. The decider
should use the workflow type and the history to determine how to
properly handle the task.
This action initiates a long poll, where the service holds the HTTP
connection open and responds as soon a task becomes available. If no
decision task is available in the specified task list before the timeout
of 60 seconds expires, an empty result is returned. An empty result, in
this context, means that a DecisionTask is returned, but that the value
of taskToken is an empty string.
Deciders should set their client side socket timeout to at least 70
seconds (10 seconds higher than the timeout).
Because the number of workflow history events for a single workflow
execution might be very large, the result returned might be split up
across a number of pages. To retrieve subsequent pages, make additional
calls to \code{PollForDecisionTask} using the \code{nextPageToken} returned by the
initial call. Note that you do \emph{not} call \code{GetWorkflowExecutionHistory}
with this \code{nextPageToken}. Instead, call \code{PollForDecisionTask} again.
\strong{Access Control}
You can use IAM policies to control this action's access to Amazon SWF
resources as follows:
\itemize{
\item Use a \code{Resource} element with the domain name to limit the action to
only specified domains.
\item Use an \code{Action} element to allow or deny permission to call this
action.
\item Constrain the \code{taskList.name} parameter by using a \code{Condition}
element with the \code{swf:taskList.name} key to allow the action to
access only certain task lists.
}
If the caller doesn't have sufficient permissions to invoke the action,
or the parameter values fall outside the specified constraints, the
action fails. The associated event attribute's \code{cause} parameter is set
to \code{OPERATION_NOT_PERMITTED}. For details and example IAM policies, see
\href{https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html}{Using IAM to Manage Access to Amazon SWF Workflows}
in the \emph{Amazon SWF Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$poll_for_decision_task(
domain = "string",
taskList = list(
name = "string"
),
identity = "string",
nextPageToken = "string",
maximumPageSize = 123,
reverseOrder = TRUE|FALSE
)
}
}
\keyword{internal}
|
0c197efb2e89c913726428892563eca8afa1cb48 | 14fad1c1a177ba6b91b788fd892426d51a2622ab | /code/deprecated/001_fnPopSim.R | a0cef9f9d9f734f476fafdc0d06210caa41d818f | [
"MIT"
] | permissive | Sz-Tim/kelper | 9a29b8f84a305444c755ef57dbf37677ebd176c9 | b3785c71e5b32ad5517f4e650356639f1a475c5f | refs/heads/main | 2023-08-07T12:45:39.837032 | 2023-08-01T13:38:28 | 2023-08-01T13:38:28 | 438,286,692 | 0 | 0 | MIT | 2023-08-01T13:38:29 | 2021-12-14T14:36:51 | R | UTF-8 | R | false | false | 14,563 | r | 001_fnPopSim.R | # KELPER
# Simulation functions
# Tim Szewczyk
# Population simulation functions
#' Simulate one population
#'
#' @param pars List of parameter values
#' @param N0 Vector of initial density for each stage
#' @param ndraws Number of draws from the posterior distribution to use
#'
#' @return
#' @export
#'
#' @examples
simulatePopulation <- function(pars, N0=NULL, ndraws=4e3) {
library(tidyverse); library(brms)
#---- setup landscape
env.df <- pars$env %>%
mutate(PAR_atDepth=PAR * exp(-KD * pars$depth),
lPAR_atDepth=log(PAR),
location=NA) %>%
select(PAR_atDepth, lPAR_atDepth, SST, fetch, location)
if(nrow(pars$env) == 1) {
env.df <- env.df %>% uncount(pars$tmax)
}
#---- setup parameters
if(pars$stochParams) {
par.yr <- list(loss=rbeta(pars$tmax, prod(pars$lossRate), (1-pars$lossRate[,1])*pars$lossRate[,2]),
settlement=pmax(0, rnorm(pars$tmax, pars$settlementRate[,1], pars$settlementRate[,2])),
surv=apply(pars$survRate, 1, function(x) rbeta(pars$tmax, prod(x), (1-x[1])*x[2])),
growStipeMax=apply(pars$growthRateStipeMax, 1, function(x) rnorm(pars$tmax, x[1], x[2])),
growFrond=apply(pars$growthRateFrond, 1, function(x) rnorm(pars$tmax, x[1], x[2])))
} else {
par.yr <- list(loss=rep(pars$lossRate[1], pars$tmax),
settlement=rep(pars$settlementRate$mn[1], pars$tmax),
surv=apply(pars$survRate, 1, function(x) rep(x[1], pars$tmax)),
growStipeMax=apply(pars$growthRateStipeMax, 1, function(x) rep(x[1], pars$tmax)),
growFrond=apply(pars$growthRateFrond, 1, function(x) rep(x[1], pars$tmax)))
}
#---- setup storm effects
if(is.null(pars$stormIntensity)) {
par.yr$surv_strm <- par.yr$surv
} else {
# vector of storm intensities
# affects winter survival, loss
# should depend on depth...
par.yr$loss <- qbeta(pnorm(pars.sim$storms, 0, 1),
prod(pars$lossRate),
(1-pars$lossRate[,1])*pars$lossRate[,2])
par.yr$surv_strm <- apply(pars$survRate, 1,
function(x) qbeta(pnorm(-pars.sim$storms, 0, 1),
prod(x), (1-x[1])*x[2]))
}
par.yr$surv <- sqrt(par.yr$surv) # annual rates to 1/2 year rates
par.yr$surv_strm <- sqrt(par.yr$surv_strm)
#---- global parameters
## maxStipeLen = maximum expected canopy height
## sizeClassLimits = boundaries between stages
## sizeClassMdPts = midpoint per stage
## K_N = carrying capacity on abundance / m2
## K_FAI = carrying capacity on frond area / m2
maxStipeLen <- getPrediction(pars$canopyHeight, ndraws,
env.df %>% summarise(across(.fns=mean)) %>% mutate(location=NA_character_),
pars$sc.df$canopyHeight.lm, "maxStipeLen")
sizeClassLimits <- maxStipeLen * c(0, 0.333, 0.75, 1.25)#(0:3)/3
sizeClassMdpts <- (sizeClassLimits+lag(sizeClassLimits))[-1]/2
K_N <- pmax(1e-2, getPrediction(pars$N_canopy, ndraws, env.df,
pars$sc.df$N_canopy.lm, "N"))
K_FAI <- pmax(1e-2, getPrediction(pars$FAI, ndraws, env.df,
pars$sc.df$FAI.lm, "FAI"))
if(is.null(N0)) N0 <- K_N[1] * c(5,2,1)/2
#---- per capita mass, area by stage
logWtStipe.stage <- log(sizeClassMdpts) %>%
map(~getPrediction(pars$lenSt_to_wtSt, ndraws, bind_cols(logLenStipe=.x, env.df),
pars$sc.df$lenSt_to_wtSt.lm, "logWtStipe")) %>%
do.call('cbind', .)
logWtFrond.stage <- log(sizeClassMdpts) %>%
map(~getPrediction(pars$lenSt_to_wtFr, ndraws, bind_cols(logLenStipe=.x, env.df),
pars$sc.df$lenSt_to_wtFr.lm, "logWtFrond")) %>%
do.call('cbind', .)
logAreaFrond.stage <- log(sizeClassMdpts) %>%
map(~getPrediction(pars$wtFr_to_arFr, ndraws, bind_cols(logWtFrond=.x, env.df),
pars$sc.df$wtFr_to_arFr, "logAreaFrond")) %>%
do.call('cbind', .)
#---- storage & initialization
## N[stage,year,season] = density/m2
## FAI[stage,year,season] = frond area / m2
## harvest[year] = log(grams harvested) / m2
## kappa[year,season,FAI|N] = proportion of K
## biomass[year,season] = kg / m2
N <- FAI <- array(dim=c(3, pars$tmax, 3))
N[,1,1] <- N0
FAI[,1,1] <- N[,1,1] * exp(logAreaFrond.stage[1,])
harvest <- rep(0, pars$tmax)
kappa <- array(dim=c(pars$tmax, 3, 2))
#---- transition matrices
## A[[growing|non-growing]][stageTo,stageFrom]
A <- map(1:2, ~matrix(0, 3, 3))
#---- simulation loop
for(year in 1:pars$tmax) {
harvestYear <- year %% pars$freqHarvest == 0
#---- growing season:
season <- 1
kappa[year,season,] <- pmin(1, c(FAI[3,year,season]/K_FAI[year], N[3,year,season]/K_N[year]))
# growth
growRate_i <- par.yr$growStipeMax[year,] * (1-kappa[year,season,1]^pars$growthRateDensityShape)
prGrowToNext <- pmin(1, pmax(0, growRate_i/(sizeClassLimits-lag(sizeClassLimits))[-1]))
A[[1]][2,1] <- par.yr$surv[year,1]*prGrowToNext[1]
A[[1]][3,2] <- par.yr$surv[year,2]*prGrowToNext[2]
# survival
A[[1]][1,1] <- par.yr$surv[year,1] - A[[1]][2,1]
A[[1]][2,2] <- par.yr$surv[year,2] - A[[1]][3,2]
A[[1]][3,3] <- par.yr$surv[year,3]
# update population
N[,year,season+1] <- A[[1]] %*% N[,year,season]
FAI[,year,season+1] <- growFrondArea(FAI[,year,season], N[,year,season],
A[[1]], kappa[year,season,1],
logAreaFrond.stage[year,], par.yr$growFrond[year,])
#---- harvest:
season <- 2
kappa[year,season,] <- pmin(1, c(FAI[3,year,season]/K_FAI[year], N[3,year,season]/K_N[year]))
# harvest
if(harvestYear) {
# update population
N[,year,season+1] <- (1-pars$prFullHarvest) * N[,year,season]
FAI[,year,season+1] <- (1-pars$prFullHarvest) * FAI[,year,season]
} else {
N[,year,season+1] <- N[,year,season]
FAI[,year,season+1] <- FAI[,year,season]
}
#---- non-growing season
season <- 3
kappa[year,season,] <- pmin(1, c(FAI[3,year,season]/K_FAI[year], N[3,year,season]/K_N[year]))
if(year < pars$tmax) {
# survival
diag(A[[2]]) <- pmax(0, par.yr$surv_strm[year,])
# update population
N[,year+1,1] <- A[[2]] %*% N[,year,season]
# reproduction
N[1,year+1,1] <- par.yr$settlement[year]*(1-max(kappa[year,season,]))
FAI[,year+1,1] <- FAI[,year,season] * pmax(0, diag(A[[2]]) - par.yr$loss[year])
}
}
# biomass calculation
biomass <- calcBiomass(N, FAI, logWtStipe.stage, pars$arFr_to_wtFr,
ndraws, env.df, pars$sc.df$arFr_to_wtFr.lm, stages=3)
return(list(N=N, FAI=FAI, harvest=harvest, kappa=kappa, K_FAI=K_FAI, K_N=K_N,
biomass=biomass, PAR=env.df$PAR_atDepth))
}
#' Run Matrix Model Finer (handover code)
#'
#' @param inputs Named list of model parameters
#'
#' @return Named list with simulation outputs and key input parameters
#'
runMatrixModelFiner <- function(inputs){
# Unlist inputs
domainArea <- inputs$domainArea # Area in m^2
Ntimesteps <- inputs$Ntimesteps
timestep <- inputs$timestep
NsizeClasses <- inputs$NsizeClasses
maxSize <- inputs$maxSize
settle <- inputs$settle * timestep # Settlement per m^2 free space
mu0 <- 1 - (1 - inputs$mu0)^timestep # Mortality in open space
mu1 <- inputs$mu1*mu0
# Growth (transition rate) in open space
g1 <- inputs$g1
allom1 <- inputs$allom1
# allom2 <- inputs$allom2
# allom3 <- inputs$allom3
N0 <- inputs$N0
k <- inputs$k # Max canopy area allowing enough light for subcanopy growth (proportion)
plotPars <- inputs$plotPars
plotOut <- inputs$plotOut
classStartSizes <- c(0:(NsizeClasses - 1))*maxSize/NsizeClasses
classMidPointSizes <- (maxSize/NsizeClasses) * (c(0:(NsizeClasses - 1)) + 0.5)
classSizes <- classStartSizes
# Work out size dependent growth rate in m/day based on supplied parameters
growthRateM <- growSize(classSizes, inputs$growthRateM, inputs$growthRateS)
# Derive model transition rates from size growth rates
g0 <- growthRateM*timestep*NsizeClasses/maxSize
if (max(g0) > 1) {
msg <- paste("Growth transition rate > 1",
"(number of size classes or timestep too high for given growth rate);",
"automatically set to 1")
warning(msg)
output$warnings <- msg
g0 <- g0 / max(g0)
}
# Sort out initial population, which can either be a vector with length NsizeClasses,
# or single value (assumed all in first size class)
if (length(N0)==1) {
N0 <- c(N0, rep(0, NsizeClasses-1))
}
sizeClassHoldfastArea <- pi*(classSizes/2)^2 # Species specific allometry
sizeClassCanopyArea <- allom1*sizeClassHoldfastArea # Species specific allometry
if (plotPars & plotOut) {
par(mfrow=c(2,3))
} else {
par(mfrow=c(1,3))
}
# If a constant growth rate (over size) has been specified, replace with a vector covering all size classes
if (length(g0)==1){
print("Single (constant) growth rate defined")
g0 <- array(g0, dim=NsizeClasses)
}
# Plot the functions to have a look at them
xrange <- seq(0, 1, by=0.05)
growthDensExample <- sapply(xrange, growDens, g0[1], g1, 1)
mortLinearExample <- sapply(xrange, mortLinDens, mu0, mu1)
mortOptimumExample <- sapply(xrange, mortOptDens, mu0, mu1)
if (plotPars){
plot(classSizes, growthRateM, type="l",
main="", xlab='Holdfast diameter (m)', ylab="Growth rate (m/day)",
ylim=c(0, growthRateM[1]))
plot(NA, NA,
main="", xlab='Proportion canopy filled', ylab="Growth rate",
xlim=c(0, 1), ylim=c(0, .1))
for(i in 1:NsizeClasses) {
lines(xrange, sapply(xrange, growDens, g0[i], g1, 1), col=i)
}
if(inputs$mortalityLinearDecline) {
plot(xrange, mortLinearExample ,type="l",
main="", xlab='Proportion canopy filled', ylab="Mortality rate",
ylim=c(0, abs(mu0) + abs(mu1)))
} else {
plot(xrange, mortOptimumExample, type="l", col="red",
main="", xlab='Proportion canopy filled', ylab="Mortality rate",
ylim=c(0, abs(mu0) + abs(mu1)))
}
}
# Set up matrix to store population stage (rows) structure over time (cols)
Nt <- matrix(0, ncol=Ntimesteps, nrow=NsizeClasses)
Nt[,1] <- N0
AreaHoldfast <- matrix(0, ncol=Ntimesteps, nrow=NsizeClasses)
AreaCanopy <- matrix(0, ncol=Ntimesteps, nrow=NsizeClasses)
yield <- array(0, dim=Ntimesteps)
A.ar <- array(0, dim=c(NsizeClasses, NsizeClasses, Ntimesteps))
for (step in 1:(Ntimesteps-1)) {
#Lt <- max(sum(AreaCanopy),0) # Calculate canopy area
#print(paste("AreaCanopy = ",max(sum(AreaCanopy[,step]),0)))
A <- matrix(0, nrow=NsizeClasses, ncol=NsizeClasses)
for (i in 1:(NsizeClasses - 1)) {
# Proportion of occupied space: asymmetric = plants larger than i
if (inputs$interactType == "Asymmetric") {
propOccupiedSpace <- sum(AreaCanopy[(i+1):NsizeClasses,step])/domainArea
} else {
propOccupiedSpace <- sum(AreaCanopy[,step])/domainArea
}
mortalityRate <- mortDens(propOccupiedSpace, mu0, mu1,
inputs$mortalityLinearDecline)
# Sub-diagonal entries: proportion that move to next class
A[i+1,i] <- growDens(propOccupiedSpace, g0, g1, i)*(1 - mortalityRate)
# Diagonal entries: proportion that stay in the same size class
A[i,i] <- max(0, 1 - mortalityRate - A[i+1,i])
}
# Canopy (last stage) mortality
A[NsizeClasses,NsizeClasses] <- max(0, 1 - mu0)
A.ar[,,step] <- A
Nt[,step+1] <- A %*% Nt[,step] # Apply mortality and development
Ft <- max(0, domainArea - sum(AreaHoldfast[,step])) # Calculate free space
# Number of new recruits - assumed independent of population at present
Nt[1,step+1] <- Nt[1,step+1] + settle*Ft
# Thin stand of plants once every inputs$thinDuration
if ((step*timestep) %% inputs$thinDuration < timestep) {
Nt[,step+1] <- Nt[,step+1]*(1 - inputs$thin)
yield[step + 1] <- yield[step] + sum(Nt[,step+1]*inputs$thin*sizeClassCanopyArea)
} else {
yield[step + 1] <- yield[step]
}
AreaHoldfast[,step+1] <- sizeClassHoldfastArea*Nt[,step+1]
AreaCanopy[,step+1] <- sizeClassCanopyArea*Nt[,step+1]
}
# Add the final step areas to the matrices
AreaHoldfast[,Ntimesteps] <- sizeClassHoldfastArea*Nt[,Ntimesteps]
AreaCanopy[,Ntimesteps] <- sizeClassCanopyArea*Nt[,Ntimesteps]
# Transpose Nt for plotting
TNt <- t(Nt)
if (plotOut) {
image(log10(TNt + 1), main = "Number per size class",
xlab = expression(paste("Time (years)")), ylab = "Holdfast diameter (m)",
axes = F)
axis(1, at = seq(0, 1, length = Ntimesteps),
labels = sprintf("%.2f", c(1:Ntimesteps)*timestep/365),
lwd = 0, pos = 0
)
axis(2, at = seq(0, 1, length = NsizeClasses),
labels = sprintf("%.2f", c(1:NsizeClasses)*maxSize/NsizeClasses),
lwd = 0, pos = 0
)
}
TAh <- t(AreaHoldfast)
TAc <- t(AreaCanopy)
if (plotOut) {
x <- c(1:Ntimesteps)*timestep / 365
matplot(x, cbind(TAc, rowSums(TAc))/domainArea,
main = "Canopy area (size classes)",
xlab = "Time", ylab = expression(paste("Area (m" ^ "2", ")")),
ylim = c(0, 1), type = "l"
)
lines(x, rowSums(TAc)/domainArea, lwd = 2)
}
if (plotOut == TRUE) {
x <- c(1:Ntimesteps)*timestep/365
plot(x, yield/domainArea,
main = "Yield",
xlab = "Time", ylab = expression(paste("Area (m" ^ "2", ")")),
type = "l"
)
}
output <- list(
TNt=TNt,
TAh=TAh,
TAc=TAc,
yield=yield,
transitionMatrix=A,
densrange=xrange,
growVsSize=growthRateM,
growVsDens=growthDensExample,
classSizes=classSizes,
sizeClassHoldfastArea=sizeClassHoldfastArea,
sizeClassCanopyArea=sizeClassCanopyArea,
domainArea=domainArea,
NsizeClasses=NsizeClasses,
Ntimesteps=Ntimesteps,
maxSize=maxSize,
timestep=timestep,
mu0=mu0,
mu1=mu1
)
if (inputs$mortalityLinearDecline){
output$mortVsDens <- mortLinearExample
} else {
output$mortVsDens <- mortOptimumExample
}
return(output)
}
|
61022aab987d26ed51aa77a6f9e0eda4934da5e0 | 6464efbccd76256c3fb97fa4e50efb5d480b7c8c | /paws/man/workspaces_copy_workspace_image.Rd | dca9266bad09e935e48a2cf410e290ad64c051ec | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | johnnytommy/paws | 019b410ad8d4218199eb7349eb1844864bd45119 | a371a5f2207b534cf60735e693c809bd33ce3ccf | refs/heads/master | 2020-09-14T23:09:23.848860 | 2020-04-06T21:49:17 | 2020-04-06T21:49:17 | 223,286,996 | 1 | 0 | NOASSERTION | 2019-11-22T00:29:10 | 2019-11-21T23:56:19 | null | UTF-8 | R | false | true | 1,014 | rd | workspaces_copy_workspace_image.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workspaces_operations.R
\name{workspaces_copy_workspace_image}
\alias{workspaces_copy_workspace_image}
\title{Copies the specified image from the specified Region to the current
Region}
\usage{
workspaces_copy_workspace_image(Name, Description, SourceImageId,
SourceRegion, Tags)
}
\arguments{
\item{Name}{[required] The name of the image.}
\item{Description}{A description of the image.}
\item{SourceImageId}{[required] The identifier of the source image.}
\item{SourceRegion}{[required] The identifier of the source Region.}
\item{Tags}{The tags for the image.}
}
\description{
Copies the specified image from the specified Region to the current
Region.
}
\section{Request syntax}{
\preformatted{svc$copy_workspace_image(
Name = "string",
Description = "string",
SourceImageId = "string",
SourceRegion = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
2ca4c07a3aefdf016e7d5bef4157245ae08eb23c | 898abf9bf3ea541c2e70ce3a97d648f02643bd96 | /bubble_chart.R | 484c4ea322aa281c5792c32920fb0ff25627a149 | [] | no_license | 59161008/testGit | 027e68b5d15438cbae9a87b797178de45a7c6937 | d27f21f0ef2739850f82babcfda426094635d299 | refs/heads/master | 2020-05-23T02:32:28.441653 | 2019-05-14T11:25:45 | 2019-05-14T11:25:45 | 186,606,979 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,839 | r | bubble_chart.R | library(dplyr)
library(ggplot2)
library(plotly)
library(ggpubr)
library(RMySQL)
newdata = select(SourceData, Product, SalePrice, Profit)
data = newdata %>%
group_by(Product) %>%
count(SalePrice = sum(SalePrice),Profit = sum(Profit))
p = plot_ly(data, x = ~SalePrice,y = ~Profit,
type = 'scatter', mode = 'markers',
color = ~Product , colors = c('#FFCC99','#FFCC33','#FF3366','#CCFF66','#9933FF'),
marker = list(size = ~n,line = list(width = 1 , color='#FFFFFF')),
text = ~paste('Product Name:' ,Product,'<br>SalePrice:',SalePrice,
'<br>Profit($):',Profit,'<br>N=',n)) %>%
layout(title = 'SalePrice and Profit Correlated by Products',
xaxis = list(showgrid = FALSE, title = 'SalePrice'),
yaxis = list(showgrid = FALSE, title = 'Profit'),
showlegend = FALSE)
p
#******************************************buble**********************************************
newdata = select(FeedbackQa, Question1, Question2, Question3,Question4,Question5)
data1 = newdata %>%
group_by(Question1) %>%
count(Question1)
data2 = newdata %>%
group_by(Question2) %>%
count(Question2)
data3 = newdata %>%
group_by(Question3) %>%
count(Question3)
data4 = newdata %>%
group_by(Question4) %>%
count(Question4)
data5 = newdata %>%
group_by(Question5) %>%
count(Question5)
data = data.frame(q1 = data1$n, q2 = data2$n, q3 = data3$n, #่join data country$Country->country คือชื่อตัวแปร , Country คือชื่อคอลัมในชุดข้อมูลที่จะนำมาจอย
q4 = data4$n, q5 = data5$n) #รวมชุดข้อมูล
data = t(data)
point = c("1 point","2 point","3 point","4 point","5 point")
colnames(data)= point
y <- c('The course was effectively<br>organized',
'The course developed my<br>abilities and skills for<br>the subject',
'The course developed my<br>ability to think critically about<br>the subject',
'I would recommend this<br>course to a friend')
x1 <- c(21, 24, 27, 29)
x2 <-c(30, 31, 26, 24)
x3 <- c(21, 19, 23, 15)
x4 <- c(16, 15, 11, 18)
x5 <- c(12, 11, 13, 14)
data <- data.frame(y, x1, x2, x3, x4, x5)
top_labels <- c('Strongly<br>agree', 'Agree', 'Neutral', 'Disagree', 'Strongly<br>disagree')
p <- plot_ly(data, x = ~x1, y = ~y, type = 'bar', orientation = 'h',
marker = list(color = 'rgba(38, 24, 74, 0.8)',
line = list(color = 'rgb(248, 248, 249)', width = 1))) %>%
add_trace(x = ~x2, marker = list(color = 'rgba(71, 58, 131, 0.8)')) %>%
add_trace(x = ~x3, marker = list(color = 'rgba(122, 120, 168, 0.8)')) %>%
add_trace(x = ~x4, marker = list(color = 'rgba(164, 163, 204, 0.85)')) %>%
add_trace(x = ~x5, marker = list(color = 'rgba(190, 192, 213, 1)')) %>%
layout(xaxis = list(title = "",
showgrid = FALSE,
showline = FALSE,
showticklabels = FALSE,
zeroline = FALSE,
domain = c(0.15, 1)),
yaxis = list(title = "",
showgrid = FALSE,
showline = FALSE,
showticklabels = FALSE,
zeroline = FALSE),
barmode = 'stack',
paper_bgcolor = 'rgb(248, 248, 255)', plot_bgcolor = 'rgb(248, 248, 255)',
margin = list(l = 120, r = 10, t = 140, b = 80),
showlegend = FALSE) %>%
# labeling the y-axis
add_annotations(xref = 'paper', yref = 'y', x = 0.14, y = y,
xanchor = 'right',
text = y,
font = list(family = 'Arial', size = 12,
color = 'rgb(67, 67, 67)'),
showarrow = FALSE, align = 'right') %>%
# labeling the percentages of each bar (x_axis)
add_annotations(xref = 'x', yref = 'y',
x = x1 / 2, y = y,
text = paste(data[,"x1"], '%'),
font = list(family = 'Arial', size = 12,
color = 'rgb(248, 248, 255)'),
showarrow = FALSE) %>%
add_annotations(xref = 'x', yref = 'y',
x = x1 + x2 / 2, y = y,
text = paste(data[,"x2"], '%'),
font = list(family = 'Arial', size = 12,
color = 'rgb(248, 248, 255)'),
showarrow = FALSE) %>%
add_annotations(xref = 'x', yref = 'y',
x = x1 + x2 + x3 / 2, y = y,
text = paste(data[,"x3"], '%'),
font = list(family = 'Arial', size = 12,
color = 'rgb(248, 248, 255)'),
showarrow = FALSE) %>%
add_annotations(xref = 'x', yref = 'y',
x = x1 + x2 + x3 + x4 / 2, y = y,
text = paste(data[,"x4"], '%'),
font = list(family = 'Arial', size = 12,
color = 'rgb(248, 248, 255)'),
showarrow = FALSE) %>%
add_annotations(xref = 'x', yref = 'y',
x = x1 + x2 + x3 + x4 + x5 / 2, y = y,
text = paste(data[,"x5"], '%'),
font = list(family = 'Arial', size = 12,
color = 'rgb(248, 248, 255)'),
showarrow = FALSE) %>%
# labeling the first Likert scale (on the top)
add_annotations(xref = 'x', yref = 'paper',
x = c(21 / 2, 21 + 30 / 2, 21 + 30 + 21 / 2, 21 + 30 + 21 + 16 / 2,
21 + 30 + 21 + 16 + 12 / 2),
y = 1.15,
text = top_labels,
font = list(family = 'Arial', size = 12,
color = 'rgb(67, 67, 67)'),
showarrow = FALSE)
p
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.