blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6681d6cb89f140be9e421139abe00e5364e1bf3 | 4ffcffc8b4892779f90f2eddf3fd99f8ec0b46a6 | /man/theme_stata.Rd | bdc140efb3007dc1585e39af63f4000b174abe7c | [] | no_license | peterdalle/surveyutils | 16e35d904d4af595a86fc7a2322b3dd94bf03f75 | a97cf386ec66add507958153b292b5ce013c7d3f | refs/heads/master | 2021-07-13T07:44:43.178435 | 2020-09-18T13:22:09 | 2020-09-18T13:22:09 | 208,145,587 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 320 | rd | theme_stata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot2_themes.r
\name{theme_stata}
\alias{theme_stata}
\title{Stata style ggplot2 theme}
\usage{
theme_stata(font_size = 12, lines = TRUE, legend = TRUE, ...)
}
\arguments{
\item{...}{}
}
\value{
}
\description{
Stata style ggplot2 theme
}
|
526cf0e5119c62ceafaf11b60eff07c36e89f3dc | 9aed022c11d38072eba77cd6bfde2e08907dd95b | /deliverables/Scripts/plots.r | ba61fadf3fe6efc67b38d878254a24d13ae2882c | [] | no_license | tds-andre/pricing-challenge | 9488118898a1ccec534438675b5d68f82c090a46 | 16f4330e86ed8866ac8f0b97eed454d7f3cbf2ea | refs/heads/master | 2021-01-13T09:21:45.513854 | 2016-10-01T17:58:44 | 2016-10-01T17:58:44 | 69,754,148 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,312 | r | plots.r | ###################################################################################################
library(RMySQL)
library(vars)
library(forecast)
db = dbConnect(MySQL(), user='root', password='root', dbname='b2w2', host='localhost')
###################################################################################################
# BOXES & HISTOGRAMS
sales = fetch(dbSendQuery(db, "select * from sales"),n=-1)
prices = list()
prods = sort(unique(sales$product))
i = 1
par(mfrow=c(3,3))
for(product in prods){
prices[[i]] = sales[sales$product==product,5:5]
hist(prices[[i]], main=product, freq=FALSE)
i = i + 1
}
par(mfrow=c(1,1))
boxplot(prices)
###################################################################################################
# SCATTERS
daily = fetch(dbSendQuery(db, "select product,volume,price from daily_summary"),n=-1)
par(mfrow=c(3,3))
for(product in prods){
sub = daily[daily$product==product,2:3]
plot(sub$volume,sub$price,main=product, sub=NULL, xlab=NULL, ylab=NULL)
lm = lm(sub$volume~sub$price)
abline(lm, col="red")
}
###################################################################################################
# ACFs & CCFs
par(mfrow=c(1,1))
prices = fetch(dbSendQuery(db, "select * from sales_and_prices where product = 'P2' and competitor = 'C1' order by price_at"),n=-1)
prices2 = fetch(dbSendQuery(db, "select min(min_price) as min, avg(avg_price) as avg, max(max_price) as max, my_base_price, volume from sales_and_prices where product = 'P2' group by price_at order by price_at"),n=-1)
volumes = fetch(dbSendQuery(db, "select * from daily_summary where product = 'P2'"),n=-1)
ccf(prices$volume, prices$avg_price, main ='Volume x C1 Price Cross Correlation')
ccf(prices$volume, prices$my_base_price, main ='Volume xPrice Cross Correlation')
acf(prices$volume, main='Volume Autocorrelation', lag = 100)
acf(prices$my_base_price, main='Price Autocorrelation', lag = 100)
plot(prices$my_base_price, prices$min_price)
plot(prices$my_base_price, prices$avg_price)
plot(prices$my_base_price, prices$max_price)
plot(prices2$my_base_price, prices2$min)
plot(prices2$my_base_price, prices2$max)
plot(prices2$my_base_price, prices2$avg)
plot(prices2$min, prices$volume)
plot(prices2$volume, prices2$max)
plot(prices2$volume, prices2$avg)
|
63f8c08ad150399af3f0584c8c0ed9718c648d93 | 18df0ee04b5654c30475fabbb669cff7e112b98b | /man/unite_ex_data_3.Rd | 14fc6fe3d8dfa60033af004f2605584045c7acac | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | seninp/metacoder | fa7a84787fafb9d67aef5226b0b9e17c5defd654 | a0685c540fec9955bc2a068cc7af46b5172dcabe | refs/heads/master | 2020-06-10T20:44:04.208387 | 2016-09-27T21:59:15 | 2016-09-27T21:59:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,102 | rd | unite_ex_data_3.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset_documentation.R
\docType{data}
\name{unite_ex_data_3}
\alias{unite_ex_data_3}
\title{Example of UNITE fungal ITS data}
\format{An object of type \code{\link{taxmap}}}
\source{
\url{https://unite.ut.ee/}
}
\usage{
unite_ex_data_3
}
\description{
A dataset containing information from 500 sequences from the UNITE reference database.
}
\examples{
\dontrun{
file_path <- system.file("extdata", "unite_general_release.fasta", package = "metacoder")
sequences <- ape::read.FASTA(file_path)
unite_ex_data_3 <- extract_taxonomy(sequences,
regex = "^(.*)\\\\|(.*)\\\\|(.*)\\\\|.*\\\\|(.*)$",
key = c(seq_name = "obs_info", seq_id = "obs_info",
other_id = "obs_info", "class"),
class_regex = "^(.*)__(.*)$",
class_key = c(unite_rank = "taxon_info", "name"),
class_sep = ";")
}
}
\keyword{datasets}
|
dffaf731d565396b90620021189829f65e05ec1c | 91ad89718692642bb2ff3682533a42dd300ac913 | /plot3.R | 96650d87b21748900e6825a91ea8c1c554d2ef73 | [] | no_license | blueMarvin42/Expdata-project2 | dd8f34a24290948a4853c093bf46dccf3157f84d | fa70bf7b54ae777b05ef128150a8ce2483735860 | refs/heads/master | 2020-05-30T23:14:08.929688 | 2014-05-16T02:35:35 | 2014-05-16T02:35:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 520 | r | plot3.R | NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
data<-transform(NEI,type=factor(type),year=factor(year))
data2<-data[data$fips=="24510",]
library("plyr")
library("ggplot2")
plotdata3<-ddply(data2,.(year,type),summarize,sum=sum(Emissions))
png("plot3.png")
gplot<-ggplot(plotdata3,aes(year,sum))
gplot+geom_point()+facet_grid(.~type)+labs(title="PM2.5 Emission in Baltimore city",
y="total PM2.5 emission each year")
dev.off()
|
b8f20ece0072ea9a43abef08d3a0c0f1fb8503b7 | b0fde103411363294569369c3a624cf0c54788ef | /Dec 2018/VH ratio_secondexp.R | 7ea3e97b929833d556824540bf8c875c4eadf4c4 | [] | no_license | kaye11/Postdoc-R | e66dfe054a275e886f5ac0af0bc30f120597a15c | 1b5600c8ea0d2243c2d567487c92f4ef30533e50 | refs/heads/master | 2021-11-25T06:55:20.086146 | 2021-11-24T22:28:04 | 2021-11-24T22:28:04 | 151,623,130 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,363 | r | VH ratio_secondexp.R |
library(readxl)
host <- read_excel("Postdoc-R/Exported Tables/SecondExp_sytox.xlsx")
virbac <- read_excel("Postdoc-R/Exported Tables/SecondExp_virbac.xlsx")
require(ggplot2)
require(Rmisc)
require (plotly)
source("theme_Publication.R")
require(reshape2)
source("resizewin.R")
require(dplyr)
resize.win (12,9)
require (tidyr)
#extract cell and viral count
cell <- host %>% filter(stain %in% c("countperml"))
ehv <- virbac %>% filter(cell %in% c("EhV"))
cell <- cell%>% arrange (factor(maingroup, c("still-control", "still-infected",
"turbulent-control", "turbulent-infected",
"still-viral particles", "turbulent-viral particles")))
ehv <- ehv%>% arrange (factor(maingroup, c("still-control", "still-infected",
"turbulent-control", "turbulent-infected",
"still-viral particles", "turbulent-viral particles")))
cell.ehv <- cbind(cell [c(5:6)], ehv [c(6, 8:13)])
cell.ehv$VH <- cell.ehv$count/cell.ehv$value
cell.ehv$VHdiv <- cell.ehv$VH/10^3
cell.ehv.dropvp <- cell.ehv[! cell.ehv$group2=="viralparticles", ]
ggplotly(ggplot(data=cell.ehv.dropvp, aes(x=time, y=VHdiv, colour=group2)) +geom_boxplot() +
facet_grid(~group1, scales="free")+ geom_point()+ theme_bw())
sum.all <- summarySE(cell.ehv.dropvp, measurevar = "VHdiv",
groupvars = c("maingroup", "group1", "group2", "time"))
#combined
resize.win (9, 9)
ggplot(data=sum.all, aes(x=time, y=VHdiv, colour=maingroup, shape=maingroup, linetype=maingroup)) +
geom_point(size=5) +
geom_errorbar(aes(ymin=VHdiv-se, ymax=VHdiv+se, width=5)) +
geom_smooth(method="loess") +
labs(y= expression("EhV:Ehux"~ scriptstyle(x)~"10"^~3), x= "hours post-infection") +
scale_x_continuous(breaks=c(0, 24, 48, 72, 96, 120)) +
scale_color_manual(values = rep(c("#e41a1c", "#e41a1c", "#377eb8", "#377eb8"), times = 2)) +
scale_linetype_manual(values = rep(c("solid", "longdash"), times = 4)) +
scale_shape_manual(values = rep(16:17, 2)) +
theme_Publication() +
theme(legend.key.width=unit(3,"line"), legend.title = element_blank())
#boxplots: time should be a factor, geom_smooth: time should be numeric
cell.ehv.dropvp$timef <- as.factor(cell.ehv.dropvp$time)
ggplot(data=cell.ehv.dropvp, aes(x=timef, y=VHdiv, colour=group2)) +
geom_boxplot() +
labs(y= expression("EhV:Ehux"~ scriptstyle(x)~"10"^~3), x= "hours post-infection") +
scale_x_discrete(breaks=c(0, 24, 48, 72, 96, 120)) +
scale_color_manual(values = c("#e41a1c", "#377eb8")) +
facet_grid(~group1, scales="free") + geom_point() +
theme_Publication() +
theme(legend.title = element_blank())
#notcombined
ggplot(data=sum.all, aes(x=time, y=VHdiv, colour=group2)) +
geom_point(size=5) +
geom_errorbar(aes(ymin=VHdiv-se, ymax=VHdiv+se, width=5)) +
geom_smooth(method="loess") +
labs(y= expression("EhV:Ehux"~ scriptstyle(x)~"10"^~3), x= "hours post-infection") +
scale_x_continuous(breaks=c(0, 24, 48, 72, 96, 120)) +
scale_color_manual (values = c(control="lightcoral", viralparticles="seagreen3", infected="steelblue2")) +
theme_Publication() +
facet_grid(~group1)+ theme(legend.title=element_blank())
|
d0f6b79072282c34df41cd3fe8141e5879bab774 | 7b072a9b73414dbaeb09e0ff6fefac717c7b9eb5 | /scripts/RIN.R | ff722502a792eb76d2ef5f69b0442a7f5df18d5e | [] | no_license | EugeniaRadulescu/Isoform_BrainSpan | b8be74d791bd1644f38aa5a4c1ded472944d4abc | c77bb9205f0a60182c5b7e96dea529e40c717e9a | refs/heads/master | 2023-06-17T08:05:10.093729 | 2021-07-11T22:22:29 | 2021-07-11T22:22:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 870 | r | RIN.R | library(tidyverse)
prefilter_metadata <- read_tsv("data/source/brainSpan.phenotype.meta.final.tsv")
metadata <- read_csv("data/brainspan_metadata.csv")
plt <- ggplot(
data = metadata,
mapping = aes(
x = "", y = RIN
)
) +
geom_boxplot() +
theme_bw() +
theme(
text = element_text(size = 20),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_blank()
)
ggsave(filename = "data/figures/BrainSpanRIN.pdf",
plot = plt,
device = "pdf",
width = 4, height = 3)
mean(metadata$RIN)
median(metadata$RIN)
ggplot(
data = bind_rows(
prefilter_metadata %>%
mutate(Filter = "Pre-Filter"),
metadata %>%
mutate(Filter = "Post-Filter")
),
mapping = aes(
x = Filter, y = RIN
)
) +
geom_boxplot()
|
310bf9d8bdd2bb98668c5af0de58330e26f38358 | 0b8f47f43cf95f54f5c4a026788d08347abb74ec | /R/breakpointManagement.R | d32bc44efef8c5e1103d8ae38689ef8a566fa3e4 | [
"MIT"
] | permissive | tdeenes/vscDebugger | 2676e9a13567ec2459d9602ad89aa98a2296b302 | 4596c4577629217eeb8cb8b9ac9f912aaf01d698 | refs/heads/master | 2023-03-19T22:54:05.311790 | 2020-05-29T18:06:46 | 2020-05-29T18:06:46 | 270,424,310 | 0 | 0 | MIT | 2020-06-07T20:29:25 | 2020-06-07T20:29:24 | null | UTF-8 | R | false | false | 4,346 | r | breakpointManagement.R |
# Funtions to manage breakpoints from inside the R package
# Is necessary e.g. to use .vsc.debugSource() without specifying the breaklines on each call
# Is probably a bit over-complilcated for the current use cases.
# Might be necessary in more complex cases:
# - Adding/removing individual breakpoints during debugging (without resetting all other bps)
# - Verifying breakpoints during runtime (after function definition etc.)
# - Conditional breakpoints?
# - Setting/Getting breakpoints by line-range
# The breakpoints are actually set by .vsc.setBreakpoints() in ./breakpoints.R
# Structure of breakpoints is:
# interface srcBreakpoint {
# file: string;
# breakpoints: breakpoint[];
# includePackages: boolean;
# }
# interface breakpoint {
# requestedLine?: number;
# line?: number; //ignore if verified==false
# maxOffset?: number;
# id?: number;
# attempted: boolean; //default false
# verified: boolean; //default false
# message?: string;
# rFunction?: rFunction; //only in R: function that contains the bp
# rAt?: number[][]; //only in R: step that contains the bp
# }
.packageEnv$breakpoints <- list()
#' @export
.vsc.setStoredBreakpoints <- function() {
for (sbp in .packageEnv$breakpoints) {
sbp$bps <- .vsc.setBreakpoints(sbp$file, sbp$breakpoints, includePackages = sbp$includePackages)
}
}
#' @export
.vsc.getBreakpointLines <- function(file, getActualLines = FALSE) {
bps <- .vsc.getBreakpoints(file)
if (getActualLines) {
lines <- summarizeLists(bps)$line
} else {
lines <- summarizeLists(bps)$requestedLine
}
return(lines)
}
#' @export
.vsc.getAllBreakpoints <- function() {
return(.packageEnv$breakpoints)
}
#' @export
.vsc.getBreakpoints <- function(file) {
allBps <- .packageEnv$breakpoints
matchingBps <- allBps[which(lapply(allBps, function(sbp) sbp$file) == file)]
if (length(matchingBps) > 0) {
sbp <- mergeSrcBreakpoints(matchingBps)
bps <- sbp[[1]]$breakpoints
} else {
bps <- list()
}
return(bps)
}
#' @export
.vsc.addBreakpoints <- function(file = '', lines = list(), maxOffset = 0, ids = NULL, includePackages = FALSE) {
if (!is.list(lines)) {
lines <- as.list(lines)
}
if (length(ids) == 0) {
ids <- list(0)
}
if (length(ids) == 1) {
ids <- lapply(lines, function(x) ids[[1]])
}
bps <- mapply(function(line, id) list(
requestedLine = line,
id = id,
maxOffset = maxOffset,
attempted = FALSE,
verified = FALSE
), lines, ids, SIMPLIFY = FALSE, USE.NAMES = FALSE)
sbp <- list(
file = file,
breakpoints = bps,
includePackages = includePackages
)
.vsc.addBreakpoint(sbp)
}
#' @export
.vsc.addBreakpoint <- function(sbp = NULL, file = NULL, line = NULL, maxOffset = NULL, id = NULL, message = NULL, includePackages = NULL) {
if (length(sbp) == 0) {
sbp <- list()
}
bp <- sbp$breakpoints[[1]]
if (is.null(bp)) {
bp <- list()
}
if (!is.null(file)) sbp$file <- file
if (!is.null(line)) bp$requestedLine <- line
if (!is.null(maxOffset)) bp$maxOffset <- maxOffset
if (!is.null(id)) bp$id <- id
if (!is.null(message)) bp$message <- message
if (is.null(bp$attempted)) bp$attempted <- FALSE
if (is.null(bp$verified)) bp$verified <- FALSE
sbp$breakpoints[[1]] <- bp
addSrcBreakpoint(sbp)
.packageEnv$breakpoints <- mergeSrcBreakpoints(.packageEnv$breakpoints)
}
#' @export
.vsc.clearAllBreakpoints <- function() {
.packageEnv$breakpoints <- list()
}
#' @export
.vsc.clearBreakpointsByFile <- function(file = '') {
whichBreakpoints <- which(lapply(.packageEnv$breakpoints, function(bp) bp$file) == file)
.packageEnv$breakpoints[whichBreakpoints] <- NULL
}
addSrcBreakpoints <- function(sbps = list()) {
.packageEnv$breakpoints <- c(.packageEnv$breakpoints, sbps)
}
addSrcBreakpoint <- function(sbp = NULL) {
addSrcBreakpoints(list(sbp))
}
mergeSrcBreakpoints <- function(sbps) {
sbpList <- lGroupBy(sbps, item = 'file')
mergedBps <- lapply(sbpList, mergeSrcBreakpointList)
sbps <- mergedBps
return(sbps)
}
mergeSrcBreakpointList <- function(sbpList) {
if (length(sbpList) == 0) {
return(sbpList)
}
bps <- lapply(sbpList, function(sbp) sbp$breakpoints)
bps <- unlist(bps, recursive = FALSE)
bps <- unique(bps)
sbp <- sbpList[[1]]
sbp$breakpoints <- bps
return(sbp)
}
|
6c588cec8897f4e6f372efd05f377ef335363e50 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612746794-test.R | 23dcc266100ffcd92975b9506911cae418e85dfc | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,777 | r | 1612746794-test.R | testlist <- list(x = c(NaN, NaN, NaN, 1.79404028452292e-226, 1.01639411703201e+218, 3.91565326463495e-109, 5.97161285020362e+218, NaN, 7.34681306403572e-223, -2.1147142951537e-106, 1.80331570633778e-130, -Inf, NaN, NaN, 5.3687921901861e-222, 2.37340362775785e-308, 8.97030895528791e-227, NaN, -Inf, NaN, NaN, 5.53290466281806e-222, 1.28257300625062e+219, 1.29849269277858e+219, 1.29849269277858e+219, 1.80122446398248e-226, 1.87978485692413e-226, 5.36000192277546e-222, 1.28257300625062e+219, 1.29849269277858e+219, 1.30956542524369e-306, 1.3031952186927e-307, 1.298492407607e+219, 1.10313090231045e+217, 1.10313068039846e+217, 0, 1.79486475154086e-226, 1.8010707924096e-226, 1.79981002528112e-226, 7.31489600618897e-304, 1.24103971499798e+217, -6.45770588427103e+305, 9.53303727566826e-227, NaN, 1.00255192262223e-226, 1.80107573659442e-226, NaN, NaN, NaN, -1.17043173257834e+304, NaN, NaN, -5.33131728833908e-108, 1.06559615820403e-255, -5.46635800110799e-108, -5.46354690059085e-108, NaN, NaN, -9.25783436608935e+303, 1.79489223360303e-226, NaN, NaN, NaN, NaN, NaN, 1.29849307827433e+219, 1.29849269277858e+219, 1.29849240497697e+219, 1.35531044963981e-224, 1.2984926918297e+219, 1.29849269277858e+219, -5.53534454886927e-108, NaN, -3.33546468003376e-111, -5.46354694348484e-108, -9.01049622743489e+306, NaN, NaN, NaN, NaN, 3.23785921002061e-319, 1.00255192367797e-226, NA, -6.25903895935894e+303, 0), y = c(1.29849240497697e+219, NaN, NaN, 1.42448667381132e-226, 1.80107573659442e-226, 4.57678595793046e-246, 1.53021088457174e-226, 4.72720804480433e-225, -3.94692725621111e-302, 1.37418044009098e-226, NaN, Inf, 1.0751196885036e-298, 2.00551490523401e-226, 1.7940418947626e-226, NaN, 0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result) |
db909a2fb3fd37d46e1bd702ffe32e04cf447da9 | 2a65a26f2e5ff9a0aa65539bd3680386cbcf0b95 | /plots1.R | 20bdf154ae227c173d984cde356490cba988109a | [] | no_license | danielbenson/FBDATA | 2b47a64b3e5510a19cc32a6b7c7eccf09bf402d7 | 362ed87842a89a42e69e5c6ae1572c5615a2e91d | refs/heads/master | 2021-09-01T02:02:01.538685 | 2017-12-24T09:30:21 | 2017-12-24T09:30:21 | 115,251,934 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,914 | r | plots1.R | ## Examining a CSV generated from Facebook advertisers with my contact
## information after compiling country and industry information for each.
# Load required libraries
library(ggplot2)
library(dplyr)
library(tidyr)
library(stringr)
# Read in data
FB_Advertisers <- read.csv("Assume an Honest Facebook2.csv")
# Inspect Data
str(FB_Advertisers)
levels(FB_Advertisers)
summary(FB_Advertisers)
head(FB_Advertisers)
FB_Advertisers[!complete.cases(FB_Advertisers),]
unique(FB_Advertisers$Origin)
# Rename Columns to Something R Likes
colnames(FB_Advertisers) <- c("ID_Number", "Company Name", "Category", "Origin")
# Split out "Origin" from the data frame, determine proportion, and generate a
# simple plot.
PCT_Origin <- as.data.frame(prop.table(table(FB_Advertisers$Origin))*100)
PCT_Origin
colnames(PCT_Origin) <- c("Origin", "Percent of Advertisers")
PCT_Origin
P1 <- ggplot(PCT_Origin, aes(y = PCT_Origin$`Percent of Advertisers`,
x = PCT_Origin$Origin)) +
labs(y = "Percent of Advertisers", x = "Country") +
geom_bar(stat = "identity", fill = "blue") +
ggtitle("Percent of Advertisers by Country")
P1
# Split out "Category" from the data frame, determine proportion, and generate a
# simple plot.
PCT_Cat <- as.data.frame(prop.table(table(FB_Advertisers$Category))*100)
PCT_Cat
colnames(PCT_Cat) <- c("Industry", "Percent of Advertisers")
PCT_Cat
P2 <- ggplot(PCT_Cat, aes(y = PCT_Cat$`Percent of Advertisers`,
x = PCT_Cat$Industry)) +
labs(y = "Percent of Advertisers", x = "Industry") +
geom_bar(stat = "identity", fill = "green") +
ggtitle("Percent of Advertisers by Industry") +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1.0))
P2
## End
|
6defa6aa38ddafb10c89d51a6c9bd99a79b7d002 | 951d7d4e5d0b60cf158d6857ed51fd07699473ad | /labs/1_unit_intro_to_r_bioconductor/render_and_remove_answers.R | ef1df7539819184aa4ee51593c4e1e8998d070bd | [] | no_license | uashogeschoolutrecht/ABDS_2019 | d892e49983a1f211fc1e16fbcd6ed9bc31260a1d | 7b6355e0495ff2ba75d6fd52aefaec8600e16536 | refs/heads/master | 2020-05-09T10:56:35.469415 | 2019-12-16T12:51:02 | 2019-12-16T12:51:02 | 181,061,708 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,444 | r | render_and_remove_answers.R | ## renders all Rmd in the current folder
library(tidyverse)
library(filesstrings)
own_dir <- dirname(rstudioapi::getSourceEditorContext()$path)
rmd_files <- list.files(path = own_dir, pattern = "\\.Rmd",
full.names = TRUE) %>%
as.list()
rmd_files
for(i in seq_along(rmd_files)){
purrr::map(rmd_files[[i]], rmarkdown::render)
}
## remove the Rmd files (exercises only) that contain the
## answers to the exercises and puts them in the "/answers folder
## put the /answers folder in gitignore
## TODO: write a function that puts them back in a lab, on the basis
## of a lab name
#library(tidyverse)
#library(filesstrings)
own_dir <- dirname(rstudioapi::getSourceEditorContext()$path)
rmd_files <- list.files(path = own_dir, pattern = "\\.Rmd",
full.names = TRUE)
rmd_files_df <- rmd_files %>%
enframe(name = NULL)
rmd_files_df <- rmd_files_df %>%
mutate(file_name = basename(value))
rmd_files_df
ind <- str_detect(string = rmd_files_df$file_name,
pattern = "._exercise_.")
exercises <- rmd_files_df[ind, "value"] %>%
mutate(file_name = basename(value))
exercises
destination <- here::here("ANSWERS")
rmd_copied_to <- file.path(destination, exercises$file_name[2:3]) %>%
enframe(name = NULL)
## save rmd new locations
write_csv(rmd_copied_to, path = file.path(own_dir, "rmd_copied_to.csv"))
map(exercises, file.move, destinations = destination)
|
8a37bb64726f77fef68f5101f7be67374d22c982 | e722110d8ccac3ed5e23dd8f57ff8c398fcdb790 | /Chap_5_Kokko_P_G.R | 7e1639da822b7c9ea3579d64064773b91e1df4ae | [] | no_license | aszejner/first_R_model | b993b95fbf10ece5815f5803aa3fab9c6c505c77 | c8cec5e650549feaa7465716785a6aad9f8a784f | refs/heads/main | 2023-05-15T18:47:10.175841 | 2021-06-06T11:50:20 | 2021-06-06T11:50:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,135 | r | Chap_5_Kokko_P_G.R | # Hanna Kokko's book "Modelling for field biologists..." Chapter 5
#---------------R code----------------
## dmax = probability of death per time unit if you're very heavy
## dmin = probability of death per time unit if you're very lean
## c = rate of consuming resources
## f = feeding efficiency
## maxt = maximum time (i.e. number of time units the day is divided into)
## maxc = maximum condition (i.e. number of different condition units)
## The output is the ForageRule matrix, with 1 denoting foraging, and 0 denoting resting.
forage <- function(dmin, dmax, c, f, maxt, maxc) {
ForageRule <- matrix(nrow=maxc+1, ncol=maxt)
## Reminder: rows indicate condition, columns indicate time.
## Rows are chosen like this:
## dead=row 1, condition 1=row 2, condition 2=row 3, etc
## This means that best condition is maxc but this is found at row maxc+1
## Terminal reward increases with condition
## so we already know the values for the last (i.e. maxt+1st) row
Reward <- matrix(nrow=maxc+1, ncol=maxt+1)
Reward[,maxt+1] <- 0:maxc
## then, probability of death increases linearly with body weight
d <- c(0, seq(dmax, dmin, length.out=maxc))
c <- c(0, seq(0.1, 0.4, length.out=maxc))
G <- c(0, seq(0.2, 0.8, length.out=maxc))
## anyone who is alive can either improve or then not...
P_supervivencia <- (1 - d)
P_comida <- (0.5 + c)
Size <- G
## ...except those who already are in top condition
## cannot improve so they get different values here
Ptop_eat_same <- 1 - d[length(d)]
Ptop_eat_dead <- d[length(d)]
## we start from the end of the day and continue backwards
for (t in maxt:1) {
## individuals who are dead have index 1
## individuals who are in top condition have index maxc+1
## Rules for updating fitness values:
## first everyone except those who already are dead, or in top condition
## We wish to compare two benefits: the expected reward
## from now onwards if one forages, and if one rests
RewardIfForage <- matrix(nrow=maxc+1, ncol=maxt)
RewardIfRest <- matrix(nrow=maxc+1, ncol=maxt)
for (i in 2:maxc) {
RewardIfForage[i,t] <- P_supervivencia[i+1] * Reward[i,t+1] +
P_comida[i+1] * Reward[i,t+1] + Size[i] * Reward[i,t+1]
RewardIfRest[i,t] <- P_supervivencia[i] * Reward[i,t+1] +
P_comida[i] * Reward[i,t+1] + Size[i+1] * Reward[i, t+1]
}
## Now the special cases
## dead ones don't get any rewards at all
RewardIfForage[1,t] <- 0
RewardIfRest[1,t] <- 0
## top ones can't improve their condition
RewardIfForage[maxc+1,t] <- P_supervivencia[i+1] * Reward[i,t+1] +
P_comida[i+1] * Reward[i,t+1] + Size[i] * Reward[i,t+1]
RewardIfRest[i,t] <- P_supervivencia[i] * Reward[i,t+1] +
P_comida[i] * Reward[i,t+1] + Size[i+1] * Reward[i, t+1]
## Calculate the best foraging rule. This makes clever use
## of matrix notation as well as of boolean values:
## if the statement is true, the value becomes 1,
## and zero otherwise.
ForageRule[,t] <- RewardIfForage[,t] > RewardIfRest[,t]
## Update Reward by assuming individuals use the
## better of the two behavioural options in each case.
## The ! means 'not'.
Reward[,t] <- ForageRule[,t] * RewardIfForage[,t] +
as.numeric(!ForageRule[,t]) * RewardIfRest[,t]
}
## Now some graphical procedures. Each state is represented as a rectangle
## that will be coloured blue or white depending on whether one forages or not.
## This plots coloured squares in the correct position on a graph.
colour <- c("white", "blue")
require(lattice)
require(grid)
mypanel <- function(x, y, z, ...) {
panel.levelplot(x, y, z, ...)
grid.rect(x=x, y=y, width=1, height=1, default.units="native")
}
print(levelplot(t(ForageRule),
scales=list(tck=0,
x=list(at=1:maxt,labels=1:maxt),
y=list(at=1:(maxc+1),labels=0:maxc)),
colorkey=FALSE, col.regions=colour, aspect="fill",
xlab="Time", ylab="Condition", panel=mypanel))
return(list(ForageRule=ForageRule))
}
t <- 5
#------------Parameters-----------------
#This script is added to save parameter values used
library(grid)
library(lattice)
dmax = 0.3 #probability of death per time unit if you're very heavy
dmin = 0.1 #probability of death per time unit if you're very lean
c = 0.4 #rate of consuming resources
f = 0.8 # feeding efficiency
maxt = 5 #maximum time (i.e. number of time units the day is divided into)
maxc = 6 #maximum condition (i.e. number of different condition units)
#The output is the ForageRule matrix, with 1 denoting foraging, and 0 denoting resting.
forage(dmin, dmax, c, f, maxt, maxc) #The plot doesn't work?
# Calling this function I only get the matrix in the console, the grid
# appears but with no colors in it.Where is the problem?
#---------------Plots-----------------------
# To obtain the next grids, you must run the lines inside the "forage" function.
# This is strange, and when you want to change the parameters, you have to
# run again the function and the lines inside it. After doing this, you can run
# the lines to obtain the graphs.It is weird, and I don't really know how
# could I solve it in the way it was planned... :/
# Grid 1
library('plot.matrix')
par(mar=c(5.1, 4.1, 4.1, 4.1))
plot(
ForageRule,
y = NULL,
breaks = NULL,
col = colour,
na.col = NULL,
na.cell = TRUE,
na.print = TRUE,
digits = NA,
fmt.cell = NULL,
fmt.key = NULL,
polygon.cell = NULL,
polygon.key = NULL,
text.cell = NULL,
key = list(side = 4, las = 1),
axis.col = maxt,
axis.row = NULL,
axis.key = NULL,
max.col = 70,
ylab = "Fitness",
xlab = "time step",
main = "Decision matrix"
)
axis(2, at=1:7, labels=seq(0,6,1))
# Grid 2
library(reshape2)
library(ggplot2)
ggplot(melt(ForageRule), aes(x=Var2, y=Var1, fill=value)) + geom_tile() +
scale_fill_viridis_d(name = "Action", labels = c("Rest", "Forage"), alpha = 0.5) +
scale_y_discrete(name = "Fitness", breaks = c(1,2,3,4,5,6,7),
labels = c("0","1","2","3","4","5","6"), limit = c(1,2,3,4,5,6,7)) +
scale_x_continuous(name="Time step", limits=c(0.5, 5.5)) +
geom_segment(aes(x = 0.5, y = 0.5, xend = 5.5, yend = 0.5)) +
geom_segment(aes(x = 0.5, y = 1.5, xend = 5.5, yend = 1.5)) +
geom_segment(aes(x = 0.5, y = 2.5, xend = 5.5, yend = 2.5)) +
geom_segment(aes(x = 0.5, y = 3.5, xend = 5.5, yend = 3.5)) +
geom_segment(aes(x = 0.5, y = 4.5, xend = 5.5, yend = 4.5)) +
geom_segment(aes(x = 0.5, y = 5.5, xend = 5.5, yend = 5.5)) +
geom_segment(aes(x = 0.5, y = 6.5, xend = 5.5, yend = 6.5)) +
geom_segment(aes(x = 0.5, y = 7.5, xend = 5.5, yend = 7.5)) +
geom_segment(aes(x = 0.5, y = 0.5, xend = 0.5, yend = 7.5)) +
geom_segment(aes(x = 1.5, y = 0.5, xend = 1.5, yend = 7.5)) +
geom_segment(aes(x = 2.5, y = 0.5, xend = 2.5, yend = 7.5)) +
geom_segment(aes(x = 3.5, y = 0.5, xend = 3.5, yend = 7.5)) +
geom_segment(aes(x = 4.5, y = 0.5, xend = 4.5, yend = 7.5)) +
geom_segment(aes(x = 5.5, y = 0.5, xend = 5.5, yend = 7.5))
# This is not the optimum way to do this, but the other options I
#have considered are worse than this one
dmax = 0.3 #probability of death per time unit if you're very heavy
dmin = 0.1 #probability of death per time unit if you're very lean
c = 0.4 #rate of consuming resources
f = 0.8 # feeding efficiency
maxt = 50 #maximum time (i.e. number of time units the day is divided into)
maxc = 10 #maximum condition (i.e. number of different condition units)
#The output is the ForageRule matrix, with 1 denoting foraging, and 0 denoting resting.
library(reshape2)
library(ggplot2)
ggplot(melt(ForageRule), aes(x=Var2, y=Var1, fill=value)) + geom_tile() +
scale_fill_viridis_d(name = "Action", labels = c("Rest", "Forage"), alpha = 0.5) +
scale_y_discrete(name = "Fitness", breaks = seq(1,11,1),
labels = c("0","1","2","3","4","5","6","7","8","9","10"), limit = c(1,2,3,4,5,6,7,8,9,10,11)) +
scale_x_continuous(name="Time step", limits=c(0.5, 50.5)) +
geom_segment(aes(x = 0.5, y = 0.5, xend = 5.5, yend = 0.5)) +
geom_segment(aes(x = 0.5, y = 1.5, xend = 5.5, yend = 1.5)) +
geom_segment(aes(x = 0.5, y = 2.5, xend = 5.5, yend = 2.5)) +
geom_segment(aes(x = 0.5, y = 3.5, xend = 5.5, yend = 3.5)) +
geom_segment(aes(x = 0.5, y = 4.5, xend = 5.5, yend = 4.5)) +
geom_segment(aes(x = 0.5, y = 5.5, xend = 5.5, yend = 5.5)) +
geom_segment(aes(x = 0.5, y = 6.5, xend = 5.5, yend = 6.5)) +
geom_segment(aes(x = 0.5, y = 7.5, xend = 5.5, yend = 7.5)) +
geom_segment(aes(x = 0.5, y = 0.5, xend = 0.5, yend = 7.5)) +
geom_segment(aes(x = 1.5, y = 0.5, xend = 1.5, yend = 7.5)) +
geom_segment(aes(x = 2.5, y = 0.5, xend = 2.5, yend = 7.5)) +
geom_segment(aes(x = 3.5, y = 0.5, xend = 3.5, yend = 7.5)) +
geom_segment(aes(x = 4.5, y = 0.5, xend = 4.5, yend = 7.5)) +
geom_segment(aes(x = 5.5, y = 0.5, xend = 5.5, yend = 7.5))
|
30cd785f7325ff56e526e35bb40d87ba572e7b2a | 2f3265080ddf6bfbfe9b0926f078d74b8d021abb | /tests/testthat/test-calc.R | 078f11e336ec8194b98c08409cba7574a39e27b7 | [] | no_license | mnel/ggthemes | 543b6f4f3876416847a6a0447ca71b5fa15aa2e0 | 04d7bf7f962a14e60c31b5e193746029adfdb873 | refs/heads/master | 2020-03-21T05:28:50.735532 | 2018-06-19T05:47:50 | 2018-06-19T05:47:50 | 138,163,188 | 0 | 0 | null | 2018-06-21T11:46:24 | 2018-06-21T11:46:23 | null | UTF-8 | R | false | false | 380 | r | test-calc.R | context("calc")
test_that("calc_shape_pal works", {
pal <- calc_shape_pal()
expect_is(pal, "function")
expect_is(attr(pal, "max_n"), "integer")
n <- 5L
shapes <- pal(n)
expect_is(shapes, "integer")
expect_true(all(shapes < 0))
expect_equal(length(shapes), n)
})
test_that("calc_shape_pal raises warning for large n", {
expect_warning(calc_shape_pal()(100))
})
|
31ccee71c481133eb3f503d6df79b7494d033982 | fbd59e28fdc8300cf9692118e831a9d290c5d3ff | /postdoc/combine_SEs_test.R | 9d26d05a1d428630dcf886a72fa6ba3c3e16f399 | [] | no_license | NikVetr/minor_scripts | 2868e2dbf487df4edbafd62a2b0349a9e7b600fb | 7e55526ff2bdf495af4025650a29b9f209be85ae | refs/heads/master | 2023-04-13T00:47:51.825522 | 2023-04-04T19:31:34 | 2023-04-04T19:31:34 | 216,924,041 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 448 | r | combine_SEs_test.R | var2 <- function(x, bessel = T) sum((x - mean(x))^2) / (length(x) - ifelse(bessel, 1, 0))
foo <- function(n1, n2){
x1 <- rnorm(n1)
x2 <- rnorm(n2)
dx <- (mean(x1) - mean(x2))
tx <- dx / (sqrt(var2(x1,T) / n1 + var2(x2,T) / n2))
pval <- (1 - pt(abs(tx), n1 + n2 - 2)) * 2
return(c(my = pval, tt = t.test(x1, x2)$p.value))
}
n1 <- 10
n2 <- 15
out <- t(replicate(100, foo(n1, n2)))
plot(out); abline(0,1)
max(abs(out[,1] - out[,2]))
|
d11cd5474af4b153bfb8b25ab1123b385f6c7b69 | e0eecd9df16b38b33878fa4c57ef7cc20c839c4d | /doe.preprocess.R | 4d52fc117887f6ce60a0c025921d3a4d25d2cbde | [] | no_license | HopeMuller/nychomeless | 6633549fa0a0e7dcb6fca3e17eb54bda0461e829 | b3c9bb66e5283e716b89e5eb3c3399b896a4377d | refs/heads/main | 2023-04-19T12:25:37.321216 | 2021-05-10T01:37:18 | 2021-05-10T01:37:18 | 358,924,492 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,325 | r | doe.preprocess.R | # load libraries
library(plyr)
library(tidyverse)
library(foreach)
library(lubridate)
# setwd to doe folder
# Kenny: setwd("~/RANYCS/sasdata/development/kmt")
# Hope:
setwd("/Users/Home/mnt/sasdata/development/kmt")
# load in data
# doe data
raw.student <- foreach(year=2013:2019, .combine='rbind.fill') %do% {
filename <- paste0('student_',year, '.csv')
this.data <- read_csv(filename)
this.data
}
# load nsc college attendance data
raw.nsc <- read_csv('nsc_all.csv')
# read in school-level data
# Hope:
sch.doe <- read.csv("/Users/Home/Documents/MessyData/finalproj/DOE_schooldata.csv")
# Kenny: sch.doe <- read_csv("/Users/kennymai/Documents/nychomeless/DOE_schooldata.csv")
# assign student-level data to new name
doe.full <- raw.student
# assign nsc data to new name
nsc <- raw.nsc
# rename school column for merging
sch.doe <- sch.doe %>%
rename(mod.sch = DBN)
# clean the student-level data
doe.full <- doe.full %>%
# rename columns
dplyr::rename(id = RANYCSID,
pov = ANYPOV,
hmls = STHFLG,
shlt = SHLFLG,
iep = IEPSPDBIO,
ell = LEPBIO,
year = YEAR,
hlang = HLANG,
bplace = BPLACE,
gen = GENCAT,
eth = ETHCAT,
dob = DOB,
grade = DOEGLVOCT,
sch.fall = DBNOCT,
status.fall = AGDDCATOCT,
sch.spr = DBNJUN,
status.spr = AGDDCATJUN,
abs = ABSTOT,
sus = SUSTOT,
sus.days = SUSTOTDAYS)
# delete ela and math scores, too many are missing
doe.full <- doe.full %>%
select(-ELASSC, - MTHSSC) %>%
# subset data to grade levels used
filter(grade == "09" | grade == "10" | grade == "11" | grade == "12") %>%
# # filter out suspensions listed with more days than school year
# filter(sus.days < 183 | is.na(sus.days)) %>%
# change grades to numeric values
mutate(grade = as.numeric(grade),
# recode gender male as 0
gen = ifelse(gen == 2, 0, 1),
# recode shelter NAs as 0
shlt = ifelse(is.na(shlt), 0, shlt),
# combine absent days and suspended days as days missing from school
missed = abs + sus.days,
# code percentage days absent per year and percent days suspended per year
per.missed = round(missed/182,2),
per.missed = ifelse(per.missed > 1, 1, per.missed),
# parse birth year to new column
birth.yr = year(mdy(dob)),
# create column to show if they moved mid-year
mvd.mid = case_when(sch.fall != sch.spr ~ 1,
sch.fall == sch.spr ~ 0)) %>%
# filter to only keep students who will graduate by or before 2019
filter((grade == 12 & year == 2019) |
(grade >= 11 & year == 2018) |
(grade >= 10 & year == 2017) |
(grade >= 9 & year == 2016) |
(year < 2016))
# filter out students who didn't attend 9th grade in a DOE
doe.full <- doe.full %>%
group_by(id) %>%
filter(min(grade) == 9)
# column for total grades completed within NYC DOE (grades 9 - 12): visualization variable
doe.full <- doe.full %>%
group_by(id) %>%
dplyr::summarise(comp.grades = n_distinct(grade)) %>%
ungroup() %>%
right_join(doe.full)
# report final school attended (grade 9 - 12): visualization variable
doe.full <- doe.full %>%
group_by(id) %>%
select(sch.spr, year) %>%
rename(final.sch = sch.spr) %>%
filter(year == max(year)) %>%
select(-year) %>%
right_join(doe.full)
# report final status as of 2019: outcome variable
doe.full <- doe.full %>%
group_by(id) %>%
dplyr::summarise(final.status = last(status.spr)) %>%
ungroup() %>%
right_join(doe.full)
# create graduation variable
doe.full <- doe.full %>%
group_by(id) %>%
mutate(graduate = ifelse(final.status == 2, 1, 0)) %>%
ungroup() %>%
right_join(doe.full)
# filter out students whose final status is moving since we don't have the outcome we need
doe.full <- doe.full %>%
filter(final.status != 4)
# take out grades 11 and 12 for predictive variables below -----------------------------------
doe.full <- doe.full %>%
filter(grade < 11)
# add "any" flags
doe.full <- doe.full %>%
group_by(id) %>%
mutate(any.pov = as.numeric(pov > 0),
any.shlt = as.numeric(shlt > 0),
any.iep = as.numeric(iep > 0),
any.ell = as.numeric(ell > 0),
any.shlt = ifelse(is.na(any.shlt) == T, 0, any.shlt),
any.mvd = max(mvd.mid)
)
# add total count of schools each student attended
doe.full <- doe.full %>%
group_by(id) %>%
dplyr::summarise(num.schools = n_distinct(interaction(sch.fall, sch.spr))) %>%
ungroup() %>%
right_join(doe.full)
# calculate mean percentage and total days missed (absent and suspended)
# calculate mean number of suspensions per year
doe.full <- doe.full %>%
group_by(id) %>%
dplyr::summarise(mn.days.miss = round(mean(missed, na.rm=T)),
av.per.miss = round(mean(per.missed, na.rm=T),2),
mn.num.sus = round(mean(sus, na.rm=T))) %>%
ungroup() %>%
right_join(doe.full)
# create column for freshman year
doe.full <- doe.full %>%
group_by(id) %>%
mutate(frsh = case_when(grade == 9 ~ year - 1),
frsh = min(year)) %>%
ungroup() %>%
right_join(doe.full)
# create column of age difference between NYC mandated school-age entry and grade 9-age entry
doe.full <- doe.full %>%
mutate(age.diff = frsh - birth.yr - 14) %>%
# filter out students listed as starting their freshman year at age 19 or after
filter(age.diff < 7)
# indicate flag if student repeated a grade, in grade 9 or 10
doe.full <- doe.full %>%
add_count(id) %>%
group_by(id) %>%
arrange(id) %>%
mutate(any.repeats = case_when(n > 1 & (n_distinct(year) != n_distinct(grade)) ~ 1,
n == 1 | (n_distinct(year) == n_distinct(grade)) ~ 0)) %>%
select(-n)
# report final school attended (grade 9 and 10)
doe.full <- doe.full %>%
group_by(id) %>%
select(sch.spr, year) %>%
rename(mod.sch = sch.spr) %>%
filter(year == max(year)) %>%
select(-year) %>%
right_join(doe.full)
# change all NaN values to 0
doe.full <- doe.full %>%
mutate(mn.days.miss = ifelse(is.nan(mn.days.miss) == T, 0, mn.days.miss),
av.per.miss = ifelse(is.nan(av.per.miss) == T, 0, av.per.miss),
mn.num.sus = ifelse(is.nan(mn.num.sus) == T, 0, mn.num.sus))
# clean up columns
doe.simp <- doe.full %>%
select(-pov, -hmls, -shlt, -iep, -ell, -abs, -sus, -sus.days, -missed,
-per.missed, -dob,-sch.fall, -sch.spr, -status.fall, -status.spr,
-birth.yr, -mvd.mid)
# collapsing rows, so that there is only one row per student
doe.simp <- doe.simp %>%
group_by(id) %>%
filter(year == max(year))
# coding NA ethnicities as 6: unknown or unspecified
doe.simp$eth <- ifelse(is.na(doe.simp$eth)==T, 6, doe.simp$eth)
# coding NA home languages as CE: unknown
doe.simp$hlang <- ifelse(is.na(doe.simp$hlang)==T, "CE", doe.simp$hlang)
# cosing NA birthplaces as ZZ: not available
doe.simp$bplace <- ifelse(is.na(doe.simp$bplace)==T, "ZZ", doe.simp$bplace)
# checkpoint, for troubleshooting
backup <- doe.simp
doe.simp <- backup
# add school level columns and nsc first year college columns
doe.sch <- doe.simp %>%
left_join(sch.doe)
# College attendance data:
# rename is and year variables
nsc <- nsc %>%
dplyr::rename(id = RANYCSID,
year = YEAR)
doe.all <- doe.sch %>%
left_join(nsc, by = "id")
# create college column
doe.all <- doe.all %>%
mutate(college = ifelse((NSCSTRSPR > 1 | NSCSTRSPR > 1), 1, 0)) %>%
# filter out students that went to college, but, did not graduate in a NYC DOE school,
# and did not list their moving with the DOE
filter((comp.grades !=2 & college == 1) | is.na(college)) %>%
filter(id != "977E041DEE77") %>%
filter(id != "6C5ECB1DF612") %>%
filter(id != "05396D1EEDC5") %>%
filter(id != "046FBDE32012") %>%
mutate(college = ifelse(is.na(college) == T, 0, college)) %>%
select(-year.y, -NSCSTAFAL, -NSCNAMFAL, -NSCSTRFAL, -NSCSTASPR,
-NSCNAMSPR, -NSCSTRSPR, -NSCANYFAL, -NSC4YRFAL, -NSC2YRFAL,
-NSCANYSPR, -NSC4YRSPR, -NSC2YRSPR)
|
e9aae272cf613d8d0983da11aeb4559e3925f768 | 8a483632aada1fea716ed7ddab9ef42b113c413e | /code/scenarios/80_10/run_all.R | 1d067a0ae74d274e5aecada68642c8169fea4ea6 | [] | no_license | ben-williams/parallel_diverge | ea54ca6caee59d321412e088ae57f920850d4464 | 9a0fd91a8e2418bbb0b1f0e7f37ca9b8c66acd7c | refs/heads/master | 2020-07-06T12:56:27.404297 | 2018-08-06T18:44:12 | 2018-08-06T18:44:12 | 66,984,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 493 | r | run_all.R | # rm(list=ls())
# source('code/scenarios/80_10/status_quo.R')
# rm(list=ls())
# source('code/scenarios/80_10/state_llp_super_x.R')
# rm(list=ls())
# source('code/scenarios/80_10/state_llp_small_vessel.R')
# rm(list=ls())
# source('code/scenarios/80_10/state_llp_equal_catch_share.R')
# rm(list=ls())
# source('code/scenarios/80_10/community_quota_open_access.R')
rm(list=ls())
source('code/scenarios/80_10/community_quota_fed_only.R')
rm(list=ls())
source('code/80_10/scenarios/fed_ifq.R') |
710a62066d09785a47dff9bdac7da3ffebc0ea93 | e97dd4bea5b9a53197b3ee29402f45725abfadb7 | /man/randcorr.sample.sink.Rd | c36126567e80aec14c551a28cd5e33d481e48547 | [] | no_license | cran/randcorr | 4b4bba95197387034c847768e718a8ba4128ece1 | 9c44d079c29fcd89dc43b510dbd890658ea6dd1e | refs/heads/master | 2020-04-06T23:57:55.442875 | 2018-11-16T14:30:03 | 2018-11-16T14:30:03 | 157,886,501 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,236 | rd | randcorr.sample.sink.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/randcorr.R
\name{randcorr.sample.sink}
\alias{randcorr.sample.sink}
\title{Sample from the (unnormalized) distribution sin(x)^k, 0 < x < pi, k >= 1}
\usage{
randcorr.sample.sink(k)
}
\arguments{
\item{k}{The \code{k} parameter of the distribution. If this is a vector, the function draws a random variate for every entry in \code{k}.}
}
\value{
A vector of samples with length equal to the length of \code{k}
}
\description{
Sample from the (unnormalized) distribution sin(x)^k, 0 < x < pi, k >= 1
}
\section{Details}{
This code generates samples from the sin(x)^k distribution using the specified vector \code{k}.
}
\examples{
# -----------------------------------------------------------------
# Example 1: Draw a random variate from sin(x), 0<x<pi
x = randcorr.sample.sink(1)
# Example 2: Draw a million random variate from sin^3(x), 0<x<pi
x = randcorr.sample.sink( matrix(3, 1e6,1) )
mean(x)
var(x)
}
\references{
Enes Makalic and Daniel F. Schmidt
An efficient algorithm for sampling from sin^k(x) for generating random correlation matrices,
arXiv:1809.05212, 2018.
}
\seealso{
\code{\link{randcorr}}
}
|
0ae3ddcdf7332bfdbdc180ae6882d805e053c07f | cc2bd8bb7a92aad4b7b4186c58a3ce0a00aa9f97 | /man/get_question.Rd | 5a41b6e26ee0767c479ae60920e37cd3685386fd | [
"MIT"
] | permissive | d-edison/RSOQuestions | 419bb486bd9579a49513e0a8e5db60939df18c8f | 32d1e72d8e99d2c80d2f00101025f49abaf2d791 | refs/heads/master | 2020-05-07T15:19:35.266164 | 2019-04-10T17:47:04 | 2019-04-10T17:47:04 | 180,631,894 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 492 | rd | get_question.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-question.R
\name{get_question}
\alias{get_question}
\title{Get A Question by ID}
\usage{
get_question(id = get_recent_ids()[1])
}
\arguments{
\item{id}{The ID of the question, which can be found in the URL (default to
the most recent question on the \link{r} tag on stackoverflow).}
}
\value{
An object of class \code{SOQuestion}
}
\description{
Get A Question by ID
}
\examples{
q <- get_question(54028838)
}
|
f33dc7cbf321a859662e18d0cf42d8612ec043c9 | ea0904825812f1c80bedb575cb5bb5b7da7ec9c0 | /man/FuzzyPairwiseComparisonMatrix-class.Rd | f2421bcd35662d5a80980ccdb2f8bd202ea0e073 | [] | no_license | cran/FuzzyAHP | f1cebe1e55d01956d04010250b1d189ae4e8167c | 1e2015389867bdab2351fe7ba9a34e2b534ae331 | refs/heads/master | 2021-01-10T13:17:36.203197 | 2019-12-06T15:40:02 | 2019-12-06T15:40:02 | 55,608,658 | 0 | 3 | null | null | null | null | UTF-8 | R | false | true | 711 | rd | FuzzyPairwiseComparisonMatrix-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-FuzzyPairwiseComparisonMatrix.R
\docType{class}
\name{FuzzyPairwiseComparisonMatrix-class}
\alias{FuzzyPairwiseComparisonMatrix-class}
\title{Class "FuzzyPairwiseComparisonMatrix"}
\description{
An S4 class to represent a fuzzy pairwise comparison matrix.
}
\section{Slots}{
\describe{
\item{\code{fnMin}}{A matrix of minimal values of fuzzy preferences.}
\item{\code{fnModal}}{A matrix of modal values of fuzzy preferences.}
\item{\code{fnMax}}{A matrix of maximal values of fuzzy preferences.}
\item{\code{variableNames}}{Names of variables in the pariwise comparison matrix obtained either as colnames or rownames.}
}}
|
abfa88d0d361b1ed8e6d837ca1e4a06c021ef9dc | f08c0e77a55ff1f07be5950e64a6eb41cde4ff35 | /Classification - Logistic Regression/Classification -Logistic Regression.R | 566b0dd0f98a8cbdef0453c1c62b52ac46aed508 | [] | no_license | Bharat05/R | f2f943e188ac486965d4476f94c4da038ec36c50 | 69d740bba198abe47270d5777dccc6ee14cdedd4 | refs/heads/main | 2023-05-09T22:48:19.169079 | 2021-06-07T23:05:27 | 2021-06-07T23:05:27 | 339,182,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,111 | r | Classification -Logistic Regression.R | ##################################################
### Assignment 4 Classification ##
##################################################
##################################################
# Written by Bharat Thakur ##
#
##################################################
### Basic Set Up ##
##################################################
# Clear plots
if(!is.null(dev.list())) dev.off()
# Clear console
cat("\014")
# Clean workspace
rm(list=ls())
#Set work directory
setwd("C:/Users/Kala/Google Drive/Conestoga/Data Analysis/Assignment 4 Classification/")
options(scipen=9)
##################################################
### Install Libraries ##
##################################################
#If the library is not already downloaded, download it
if(!require(pROC)){install.packages("pROC")}
library(pROC)
if(!require(klaR)){install.packages("klaR")}
library("klaR")
# For LDA
if(!require(MASS)){install.packages("MASS")}
library("MASS")
##################################################
### Read data and do preliminary data checks ##
##################################################
# Read "comma separated value" files (".csv")
# Tumor data set
Tumor_BT <- read.csv("Tumor_20F.csv", header = TRUE, sep = ",")
head(Tumor_BT,5) #Print a Few Observations to Verify
#Rename for easier interpretation
names(Tumor_BT) <- c("Outcome_BT", "Age_BT", "Sex_BT", "Bone_Density_BT", "Bone_Marrow_BT", "Lung_Spot_BT", "Pleura_BT",
"Liver_Spot_BT", "Brain_Scan_BT", "Skin_Lesions_BT", "Stiff_Neck_BT", "Supraclavicular_BT",
"Axillar_BT", "Mediastinum_BT")
names(Tumor_BT)
str(Tumor_BT)
summary(Tumor_BT)
#Adjust for 0 or 1
for( i in names(Tumor_BT)){
Tumor_BT[i] = Tumor_BT[i] - 1
}
Tumor_BT$Age_BT <- Tumor_BT$Age_BT - 1 #Age took values {2,3}
#So that 0 means No and 1 means Yes consistently throughout the dataset
Tumor_BT$Supraclavicular_BT = ifelse(Tumor_BT$Supraclavicular_BT == 0, 1,0)
Tumor_BT$Axillar_BT = ifelse(Tumor_BT$Axillar_BT == 0, 1,0)
head(Tumor_BT,5) #Print a Few Observations to Verify
##################################################
### Descriptive Analysis ##
##################################################
summary(Tumor_BT)
par(mfrow=c(3,5))
#barplot outcome
Out_table_BT <- table(Tumor_BT$Outcome_BT)
names(Out_table_BT) <- c('Not Present', 'Present')
barplot(Out_table_BT, main="Tumor Outcome")
#barplot age
Age_table_BT <- table(Tumor_BT$Age_BT)
names(Age_table_BT) <- c('Younger', 'Older')
barplot(Age_table_BT, main="Age")
#barplot sex
Sex_table_BT <- table(Tumor_BT$Sex_BT)
names(Sex_table_BT) <- c('Female', 'Male')
barplot(Sex_table_BT, main="Sex")
#barplot bone-density
Bone_Den_table_BT <- table(Tumor_BT$Bone_Density_BT)
names(Bone_Den_table_BT) <- c('Good', 'Bad')
barplot(Bone_Den_table_BT, main="Bone Density")
#barplot bone-marrow
Bone_Mar_table_BT <- table(Tumor_BT$Bone_Marrow_BT)
names(Bone_Mar_table_BT) <- c('Good', 'Bad')
barplot(Bone_Mar_table_BT, main="Bone Marrow")
#barplot Lung Spot
Lung_Spot_table_BT <- table(Tumor_BT$Lung_Spot_BT)
names(Lung_Spot_table_BT) <- c('No', 'Yes')
barplot(Lung_Spot_table_BT, main="Lung Spot")
#barplot Pleura
Pleura_table_BT <- table(Tumor_BT$Pleura_BT)
names(Pleura_table_BT) <- c('No', 'Yes')
barplot(Pleura_table_BT, main="Pleura")
#barplot Live Spot
Liver_Spot_table_BT <- table(Tumor_BT$Liver_Spot_BT)
names(Liver_Spot_table_BT) <- c('No', 'Yes')
barplot(Liver_Spot_table_BT, main="Liver Spot")
#barplot Brain Scan
Brain_table_BT <- table(Tumor_BT$Brain_Scan_BT)
names(Brain_table_BT) <- c('No', 'Yes')
barplot(Brain_table_BT, main="Brain Scan")
#barplot Skin Lesions
Skin_table_BT <- table(Tumor_BT$Skin_Lesions_BT)
names(Skin_table_BT) <- c('No', 'Yes')
barplot(Skin_table_BT, main="Skin Lesions")
#barplot Stiff Neck
Neck_table_BT <- table(Tumor_BT$Stiff_Neck_BT)
names(Neck_table_BT) <- c('No', 'Yes')
barplot(Neck_table_BT, main="Stiff Neck")
#barplot Supraclavicular
Supra_table_BT <- table(Tumor_BT$Supraclavicular_BT)
names(Supra_table_BT) <- c('No', 'Yes')
barplot(Supra_table_BT, main="Supraclavicular")
#barplot Axillar
Axil_table_BT <- table(Tumor_BT$Axillar_BT)
names(Axil_table_BT) <- c('No', 'Yes')
barplot(Axil_table_BT, main = 'Axillar')
#barplot Mediastinum
Media_table_BT <- table(Tumor_BT$Mediastinum_BT)
names(Media_table_BT) <- c('No', 'Yes')
barplot(Media_table_BT, main = 'Mediastinum')
par(mfrow=c(1,1))
##################################################
### Exploratory Analysis ##
##################################################
Tumor_Corr_BT <- cor(Tumor_BT, method="spearman")
round(Tumor_Corr_BT, 2)
######## Contigency table for Medistinum_BT variable
Tbl_Media_BT <- table(Tumor_BT$Outcome_BT, Tumor_BT$Mediastinum_BT, dnn=list("Outcome", "Mediastinum"))
Tbl_Media_BT
prop.table(Tbl_Media_BT, 2) # col percentages
#Check the Chi Squared Test - NOTE Removal of Yate's Continuity Correction
chisq_Media_BT <- chisq.test(Tumor_BT$Outcome_BT, Tumor_BT$Mediastinum_BT, correct=FALSE)
chisq_Media_BT
chisq_Media_BT$observed # What we observed
chisq_Media_BT$expected # If there were no relationship
######## Contigency table for Sex_BT variable
Tbl_Sex_BT <- table(Tumor_BT$Outcome_BT, Tumor_BT$Sex_BT, dnn=list("Outcome", "Sex"))
Tbl_Sex_BT
prop.table(Tbl_Sex_BT, 2) # col percentages
# 47% Females of females had tumors compared to men who had tumor 75% of the times.
#Check the Chi Squared Test - NOTE Removal of Yate's Continuity Correction
chisq_Sex_BT <- chisq.test(Tumor_BT$Outcome_BT, Tumor_BT$Sex_BT, correct=FALSE)
chisq_Sex_BT
chisq_Sex_BT$observed # What we observed
chisq_Sex_BT$expected # If there were no relationship
# If there were no relationship around 60% of both men and women should have tumors, but this is not what
# we observed that means that outcome and sex are correlated.
#Mediastinum Bar Chart
barplot(prop.table(Tbl_Media_BT,2), xlab='Mediastinum',ylab='Outcome',main="Outcome by Mediastinum",
col=c("darkblue","darkred")
,legend=rownames(Tbl_Media_BT), args.legend = list(x = "topleft"))
#Sex Bar Chart
barplot(prop.table(Tbl_Sex_BT,2), xlab='Sex',ylab='Outcome',main="Outcome by Sex",
col=c("darkblue","darkred"),legend=rownames(Tbl_Sex_BT), args.legend = list(x = "topleft"))
##################################################
### Building the Model ##
##################################################
#stepwise
Out_glm_BT = glm(Outcome_BT ~ Age_BT + Sex_BT + Bone_Density_BT + Bone_Marrow_BT +
Lung_Spot_BT + Pleura_BT + Liver_Spot_BT + Brain_Scan_BT +
Skin_Lesions_BT + Stiff_Neck_BT + Supraclavicular_BT+
Axillar_BT + Mediastinum_BT,
family="binomial", data=Tumor_BT, na.action=na.omit)
stp_Out_glm_BT <- step(Out_glm_BT)
summary(stp_Out_glm_BT)
#same signs as correlation coefficient
#UserModel 1 (Dropping Brain_Scan_BT)
Out_UM_1_BT = glm(Outcome_BT ~ Sex_BT + Bone_Density_BT + Skin_Lesions_BT +
Stiff_Neck_BT + Supraclavicular_BT+ Axillar_BT + Mediastinum_BT,
family="binomial", data=Tumor_BT, na.action=na.omit)
summary(Out_UM_1_BT)
#UserModel 2(Dropping Supraclavicular_BT)
start_time <- Sys.time()
Out_UM_2_BT = glm(Outcome_BT ~ Sex_BT + Bone_Density_BT + Brain_Scan_BT + Skin_Lesions_BT +
Stiff_Neck_BT + Axillar_BT + Mediastinum_BT,
family="binomial", data=Tumor_BT, na.action=na.omit)
end_time <- Sys.time()
UM2_time_BT = end_time - start_time
summary(Out_UM_2_BT)
## Check the User Models
#Confusion Matrix User Model 1
resp_UM_1_BT <- predict(Out_UM_1_BT, type="response") # creates probabilities
head(resp_UM_1_BT,20)
Class_UM_1_BT <- ifelse(resp_UM_1_BT > 0.5,1,0) # Classifies probabilities (i.e. >50% then likely to donate)
head(Class_UM_1_BT)
True_log_BT <- Tumor_BT$Outcome_BT #Creates a vector of the true outcomes
T1_BT <- table(True_log_BT, Class_UM_1_BT, dnn=list("Act Outcome","Predicted") ) # Creates a Contingency Table
T1_BT
#Confusion Matrix User Model 2
resp_UM_2_BT <- predict(Out_UM_2_BT, type="response") # creates probabilities
head(resp_UM_2_BT,20)
Class_UM_2_BT <- ifelse(resp_UM_2_BT > 0.5,1,0) # Classifies probabilities (i.e. >50% then likely to donate)
head(Class_UM_2_BT)
T2_BT <- table(True_log_BT, Class_UM_2_BT, dnn=list("Act Outcome","Predicted") ) # Creates a Contingency Table
T2_BT
#ROC Curve (and Area Under the Curve)
plot(roc(Tumor_BT$Outcome_BT,resp_UM_1_BT, direction="<"),
col="red", lwd=2, main='ROC Curve for Logistic Regression - Outcome')
auc(Tumor_BT$Outcome_BT, resp_UM_1_BT)
#better than random chance, trade off TP and FP, slightly better 1st, but depends upon what cut off of TP and FP is
# desired
#ROC Curve (and Area Under the Curve)
plot(roc(Tumor_BT$Outcome_BT,resp_UM_2_BT, direction="<"),
col="blue", lwd=2, main='ROC Curve for Logistic Regression - Outcome', add = TRUE)
auc(Tumor_BT$Outcome_BT, resp_UM_2_BT)
legend(1, .97, legend=c("User Model 1", "User Model 2"),
col=c("red", "blue"), lty=1:2, cex=0.8)
#add a legend
### SECOND PART ####
########################################
### 2. Logistic Regression - Stepwise #
########################################
#Confusion Matrix Step model
start_time_BT <- Sys.time()
Out_glm_BT = glm(Outcome_BT ~ Age_BT + Sex_BT + Bone_Density_BT + Bone_Marrow_BT +
Lung_Spot_BT + Pleura_BT + Liver_Spot_BT + Brain_Scan_BT +
Skin_Lesions_BT + Stiff_Neck_BT + Supraclavicular_BT+
Axillar_BT + Mediastinum_BT,
family="binomial", data=Tumor_BT, na.action=na.omit)
stp_Out_glm_BT <- step(Out_glm_BT)
end_time_BT <- Sys.time()
# Calculate the model fitting time
sw_time_BT <- end_time_BT - start_time_BT
summary(stp_Out_glm_BT)
#confusion matrix
resp_SW_BT <- predict(stp_Out_glm_BT, type="response") # creates probabilities
head(resp_SW_BT,20)
Class_SW_BT <- ifelse(resp_SW_BT > 0.5,1,0) # Classifies probablities (i.e. >50% then likely to donate)
head(Class_SW_BT)
#Creates Confusion Matrix
CF_SW_BT <- table(True_log_BT, Class_SW_BT, dnn=list("Act Outcome","Predicted") ) # Creates a Contingency Table
CF_SW_BT
##################################
# 3. Naive-Bayes Classification #
##################################
str(Tumor_BT)
Tumor_BT$Outcome_BT <- as.factor(Tumor_BT$Outcome_BT)
str(Tumor_BT)
start_time_BT <- Sys.time()
Tumor_Naive_BT <- NaiveBayes(Outcome_BT ~ Age_BT + Sex_BT + Bone_Density_BT + Bone_Marrow_BT +
Lung_Spot_BT + Pleura_BT + Liver_Spot_BT + Brain_Scan_BT +
Skin_Lesions_BT + Stiff_Neck_BT + Supraclavicular_BT+
Axillar_BT + Mediastinum_BT,
data = Tumor_BT, na.action=na.omit)
end_time_BT <- Sys.time()
NB_Time_BT <- end_time_BT - start_time_BT
#Classifies
pred_bay_BT <- predict(Tumor_Naive_BT,Tumor_BT)
#Creates Confusion Matrix
CF_NB_BT <- table(Actual=Tumor_BT$Outcome_BT, Predicted=pred_bay_BT$class)
CF_NB_BT
##################################
## 4. LDA #
##################################
start_time_BT <- Sys.time()
Tumor_Discrim_BT <- lda(Outcome_BT ~ Age_BT + Sex_BT + Bone_Density_BT + Bone_Marrow_BT +
Lung_Spot_BT + Pleura_BT + Liver_Spot_BT + Brain_Scan_BT +
Skin_Lesions_BT + Stiff_Neck_BT + Supraclavicular_BT+
Axillar_BT + Mediastinum_BT,
data = Tumor_BT, na.action=na.omit)
end_time_BT <- Sys.time()
LDA_Time_BT <- end_time_BT - start_time_BT
#Classifies
pred_dis_BT <- predict(Tumor_Discrim_BT, data=Tumor_BT)
#head(pred_dis$posterior,20)
#Confusion Matrix
CF_LDA_BT <- table(Actual=Tumor_BT$Outcome_BT, Predicted=pred_dis_BT$class)
#Comparing all three
#Confusion Matrix
CF_NB_BT
CF_LDA_BT
CF_SW_BT
#Run times
NB_Time_BT
LDA_Time_BT
sw_time_BT
|
483c23d4bf7e26a408ca7c882cf90e16fc2dde45 | 246d3cc2ca6435ddf0608ea173d43c2828c10332 | /man/plot.walking_distance.Rd | 3655590fd9a8d2fd086775c5914101a18a8aa377 | [] | no_license | kuzmenkov111/cholera | 62dbca487d0d5443ba24df02907d306e5533ed0d | 8b46d011be758a5d694e1ca5ea4038078275f203 | refs/heads/master | 2020-03-08T00:28:35.561563 | 2018-03-31T23:16:13 | 2018-03-31T23:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 643 | rd | plot.walking_distance.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/walkingDistance.R
\name{plot.walking_distance}
\alias{plot.walking_distance}
\title{Plot the walking distance between cases and/or pumps.}
\usage{
\method{plot}{walking_distance}(x, zoom = TRUE, radius = 0.5, ...)
}
\arguments{
\item{x}{An object of class "walking_distance" created by walkingDistance().}
\item{zoom}{Logical.}
\item{radius}{Numeric. Controls the degree of zoom.}
\item{...}{Additional plotting parameters.}
}
\value{
A base R plot.
}
\description{
Plot the walking distance between cases and/or pumps.
}
\examples{
# plot(walkingDistance(1))
}
|
ac63e274daf4a06f828e2f043e08babc3cde048a | 8308c107fe3b74f70e114db69eb59591a9029bd6 | /R/loadFiles.R | 2bbcb0db05f757f52db61cb129cdff14564aaf5d | [] | no_license | asakellariou/git-git.bioconductor.org-packages-mAPKL | 6fbcb95e90c10f7fc338476e42082b1d335c3912 | eeb2c34da54a369ff065c61f5b5ce012eeec34bb | refs/heads/master | 2020-05-19T13:45:16.185107 | 2019-05-24T18:02:51 | 2019-05-24T18:02:51 | 185,047,329 | 0 | 0 | null | 2019-05-24T18:02:52 | 2019-05-05T15:13:21 | R | UTF-8 | R | false | false | 1,875 | r | loadFiles.R | loadFiles <-
function(filesPath, trainFile, labelsFile, validationFile=NULL,
validationLabels=NULL) {
dataObj <- new("DataLD")
setwd(filesPath)
expfile1 <- paste(filesPath, sprintf("%s", trainFile), sep="")
trainset <- read.delim(expfile1, TRUE, row.names=1)
intensTrain <- data.matrix(trainset)
expfile2 <- paste(filesPath, sprintf("%s", labelsFile), sep="")
classL <- read.delim(expfile2, row.names = 1, header=TRUE)
phenoData <- new("AnnotatedDataFrame", data=classL)
dataObj@trainObj <- ExpressionSet(assayData=intensTrain,
phenoData=phenoData)
Treatment <- sum(classL)
Control <- length(classL[,1]) - Treatment
samples <- sprintf("Number of Control samples=%d and Treatment samples=%d",
Control, Treatment)
message(samples)
idx <- order(classL, decreasing="FALSE")
ordCls <- classL[,1][idx]
if(identical(classL[,1],ordCls)) {
message("Samples are ordered according to '0' and '1' labels")
startidx <- Control + 1
endidx <- Control + Treatment
dim_disease <- sprintf("The Treatment samples range between columns:
%d to %d", startidx, endidx)
message(paste(strwrap(dim_disease, exdent=2), collapse="\n"))
}
else message("Samples are not ordered according to labels '0' and '1'")
if(!is.null(validationFile)) {
expfile <- paste(filesPath, sprintf("%s", validationFile), sep="")
testset <- read.delim(expfile, TRUE, row.names=1)
intensTest <- data.matrix(testset)
expfile3 <- paste(filesPath,sprintf("%s", validationLabels), sep="")
valClassL <- read.delim(expfile3, row.names = 1, header=TRUE)
phenoData <- new("AnnotatedDataFrame", data=valClassL)
dataObj@valObj <- ExpressionSet(assayData=intensTest,
phenoData=phenoData)
}
dataObj
}
|
3d298436dcca9b09c2fb6521e925a6a21e8b5d53 | 6d8572fb50a9ba39e6372ff0de70aac877d50ec7 | /man/plot_not_na.Rd | 9f0321a39188ef44a83dbece0b2ec2307450cb46 | [] | no_license | erikerhardt/isogasex | aed346bf689f28dce3d8500dc799e80b7354c037 | 2e3fc9c21c1d3d8e2348b7bff28954b5a169b0e8 | refs/heads/master | 2020-05-22T00:32:30.670300 | 2019-07-16T04:43:20 | 2019-07-16T04:43:20 | 186,173,267 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 667 | rd | plot_not_na.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_not_na.R
\name{plot_not_na}
\alias{plot_not_na}
\title{if no data to plot, plot a dummy box (for when certain Licor columns are not collected)}
\usage{
plot_not_na(x_time, y_var, pch = 20, type, cex = 0.1, xlab = "",
ylab, main)
}
\arguments{
\item{x_time}{xxxPARAMxxx}
\item{y_var}{xxxPARAMxxx}
\item{pch}{xxxPARAMxxx}
\item{type}{xxxPARAMxxx}
\item{cex}{xxxPARAMxxx}
\item{xlab}{xxxPARAMxxx}
\item{ylab}{xxxPARAMxxx}
\item{main}{xxxPARAMxxx}
}
\value{
plot_not_na_val xxxRETURNxxx
}
\description{
If no data to plot, returns a "no data" (0,0) point as plot place holder.
}
|
86de64af19977c0965faf7692431636b8a531548 | 63e1231faa30a4cea6dd9f25e87c2372383aa2f4 | /man/Hist-class.Rd | 9a2010096e081c0047e106cc413c0fb806f37979 | [] | no_license | cran/MSEtool | 35e4f802f1078412d5ebc2efc3149c46fc6d13a5 | 6b060d381adf2007becf5605bc295cca62f26770 | refs/heads/master | 2023-08-03T06:51:58.080968 | 2023-07-19T22:10:23 | 2023-07-20T01:47:18 | 145,912,213 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,159 | rd | Hist-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class_definitions.R
\docType{class}
\name{Hist-class}
\alias{Hist-class}
\title{Class \code{'Hist'}}
\description{
An object for storing information generated by the end of the historical simulations
}
\section{Slots}{
\describe{
\item{\code{Data}}{The Data object at the end of the historical period}
\item{\code{OMPars}}{A numeric data.frame with nsim rows with sampled Stock, Fleet,
Obs, and Imp parameters.}
\item{\code{AtAge}}{A named list with arrays of dimensions: \code{c(nsim, maxage+1, nyears+proyears)} or
\code{c(nsim, maxage+1, nyears, nareas)}
\itemize{
\item Length: Length-at-age for each simulation, age, and year
\item Weight: Weight-at-age for each simulation, age, and year
\item Select: Selectivity-at-age for each simulation, age, and year
\item Retention: Retention-at-age for each simulation, age, and year
\item Maturity: Maturity-at-age for each simulation, age, and year
\item N.Mortality: Natural mortality-at-age for each simulation, age, and year
\item Z.Mortality: Total mortality-at-age for each simulation, age, year and area
\item F.Mortality: Fishing mortality-at-age for each simulation, age, year and area
\item Fret.Mortality: Fishing mortality-at-age for retained fish for each
simulation, age, year and area
\item Number: Total numbers by simulation, age, year and area
\item Biomass: Total biomass by simulation, age, year and area
\item VBiomass: Vulnerable biomass by simulation, age, year and area
\item SBiomass: Spawning biomass by simulation, age, year and area
\item Removals: Removals (biomass) by simulation, age, year and area
\item Landings: Landings (biomass) by simulation, age, year and area
\item Discards: Discards (biomass) by simulation, age, year and area
}}
\item{\code{TSdata}}{A named list with population and fleet dynamics:
\itemize{
\item Number: Total numbers; array dimensions \code{c(nsim, nyears, nareas)}
\item Biomass: Total biomass; array dimensions \code{c(nsim, nyears, nareas)}
\item VBiomass: Vulnerable biomass; array dimensions \code{c(nsim, nyears, nareas)}
\item SBiomass: Spawning Biomass; array dimensions \code{c(nsim, nyears, nareas)}
\item Removals: Removals (biomass); array dimensions \code{c(nsim, nyears, nareas)}
\item Landings: Landings (biomass); array dimensions \code{c(nsim, nyears, nareas)}
\item Discards: Discards (biomass); array dimensions \code{c(nsim, nyears, nareas)}
\item Find: Historical fishing mortality (scale-free); matrix dimensions \code{c(nsim, nyears)}
\item RecDev: Recruitment deviations (historical and projection); matrix dimensions \code{c(nsim, nyears+proyears+maxage)}
\item SPR: Named list with Equilibrium and Dynamic SPR (both matrices iwth dimensions \code{c(nsim, nyears)})
\item Unfished_Equilibrium: A named list with unfished equilibrium numbers and biomass-at-age
}}
\item{\code{Ref}}{A named list with biological reference points:
\itemize{
\item ByYear: A named list with asymptotic reference points (i.e., calculated annually without recruitment deviations) all matrices with dimensions \code{nsim} by \code{nyears+proyears}:
\itemize{
\item N0: Asymptotic unfished total number
\item SN0: Asymptotic unfished spawning number
\item B0: Asymptotic unfished total biomass
\item SSB0: Asymptotic unfished spawning biomass
\item VB0: Asymptotic unfished vulnerable biomass
\item MSY: Asymptotic MSY
\item FMSY: Fishing mortality corresponding with asymptotic MSY
\item SSBMSY: Spawning stock biomass corresponding with asymptotic MSY
\item BMSY: total biomass corresponding with asymptotic MSY
\item VBMSY: Vulnerable biomass corresponding with asymptotic MSY
\item F01: Fishing mortality where the change in yield per recruit is 10\% of that at F = 0
\item Fmax: Fishing mortality that maximizes yield per recruit
\item F_SPR: Fishing mortality corresponding to spawning potential ratio of 20 - 60\% in increments of 5\%; array dimensions \code{c(nsim, 9, nyears+proyears)}
\item Fcrash: Fishing mortality corresponding to the recruits-per-spawner at the origin of the stock-recruit relationship
\item Fmed: Fishing mortality corresponding to the median recruits-per-spawner in the historical period
\item SPRcrash: SPR corresponding to the recruits-per-spawner at the origin of the stock-recruit relationship
}
\item Dynamic_Unfished: A named list with dynamic unfished reference points for each simulation and year:
\itemize{
\item N0: Unfished total numbers
\item B0: Unfished total biomass
\item SN0: Unfished spawning numbers
\item SSB0: Unfished spawning biomass
\item VB0: Unfished vulnerable biomass
\item Rec: Unfished recruitment
}
\item ReferencePoints: A data.frame with \code{nsim} rows with with biological reference points
calculated as an average over age-of-maturity \code{ageM} years around the
current year (i.e. \code{nyears}):
\itemize{
\item N0: Average unfished numbers
\item B0: Average unfished biomass
\item SSB0: Average unfished spawning biomass (used to calculate depletion)
\item SSN0: Average unfished spawning numbers
\item VB0: Average unfished vulnerable biomass (used to calculate depletion if \code{cpar$control$D='VB'})
\item MSY: Average maximum sustainable yield (equilibrium)
\item FMSY: Average fishing mortality corresponding with MSY
\item SSBMSY: Average spawning stock biomass corresponding with MSY
\item BMSY: Average total biomass corresponding with MSY
\item VBMSY: Average vulnerable biomass corresponding with MSY
\item UMSY: Average exploitation rate corresponding with MSY
\item FMSY_M: Average FMSY/M ratio
\item SSBMSY_SSB0: Average ratio of SSBMSY to SSB0
\item BMSY_B0: Average ratio of BMSY to B0
\item VBMSY_VB0: Average ratio of VBMSY to VB0
\item RefY: Maximum yield obtained in forward projections with a fixed F
}
}}
\item{\code{SampPars}}{A named list with all sampled Stock, Fleet, Obs, and Imp parameters}
\item{\code{OM}}{The \code{OM} object (without cpars)}
\item{\code{Misc}}{A list for additional information}
}}
\author{
A. Hordyk
}
\keyword{classes}
|
d1fe08630bc63d6e4ee1a774fd86316460768037 | 62caa74246fd1c213ffcfa336c42be2d612ab668 | /Model Test1.R | 2fa85e0e7291e701d903d25e834a6c08c9ae3b5d | [] | no_license | lmdaros/TestEcoscope | 0bec7133220cc1d2303c1e5464247ab7107a354e | 85b1dc9f8be3e8086c2c48f71b67ea94c1b15c33 | refs/heads/master | 2020-12-11T04:10:25.352664 | 2016-04-27T22:53:43 | 2016-04-27T22:53:43 | 57,253,641 | 0 | 0 | null | 2016-04-27T22:43:20 | 2016-04-27T22:43:19 | R | UTF-8 | R | false | false | 23 | r | Model Test1.R | lm(mpg~wt, data=mtcars) |
9529b90ca95407d8cdfd8c68dfeeb27d01d8933a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/astrolibR/examples/helio.Rd.R | c88b56ab81daa7f44657e1ec8326ed973f516998 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 514 | r | helio.Rd.R | library(astrolibR)
### Name: helio
### Title: Compute (low-precision) heliocentric coordinates for the planets
### Aliases: helio
### Keywords: misc
### ** Examples
# (1) Find the current heliocentric positions of all the planets
jd_today <- 2456877.5
helio(jd_today,seq(1,9))
# (2) Find heliocentric position of Mars on August 23, 2000
# Result: hrad = 1.6407 AU hlong = 124.3197 hlat = 1.7853
# For comparison, the JPL ephemeris gives hrad = 1.6407 AU hlong = 124.2985 hlat = 1.7845
helio(2451779.5,4)
|
a55819b52e134ba71498e219d6502b369f293420 | f9b73300fa533c16813e072b50ae84643d9fbd5a | /src/analysis_3_habitat_correlates_of_snail_density/R/Support/1_5_expand_dat_for_missing_na.R | 721d6318e84c0649136152f069536ff788d41af1 | [] | no_license | dondealban/Wood_et_al_2019_PNAS | 8f986b2dab25d557a8eba082495b30ad8b8ca569 | a9dffb85306fb05ad14dea3bbafcecf825ca66ee | refs/heads/master | 2020-09-16T18:52:15.085809 | 2019-11-23T19:21:03 | 2019-11-23T19:21:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 347 | r | 1_5_expand_dat_for_missing_na.R | # Function to expand density data for additional hmm likelihood statements. Fills 1:0 expanded grid for each row of na categorical variables
expand_rows_for_na_cat_vars <- function( data ){
density_data <- data$density_data
individual_data <- data$individual_data
for(i in 1:nrow(density_data)){
row_sub <- density_data
}
}
|
12c1408829337b0097129440405f06671a318749 | b0f969833005451be905f4983481a6748b5c830c | /man/tauWt.Rd | 69935891a28e3b197432572f916e72da8abdb08d | [] | no_license | mariev/PF | 44c4c54d19a30213099c3b836c2b336365807ecb | 1f3014540b57de6199a5cf3eae6382741b281a94 | refs/heads/master | 2020-08-08T02:09:53.055371 | 2019-08-05T23:17:23 | 2019-08-05T23:17:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,971 | rd | tauWt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tauWt.r
\name{tauWt}
\alias{tauWt}
\title{Binomial dispersion: intra-cluster correlation parameter.}
\usage{
tauWt(fit, subset.factor = NULL, fit.only = TRUE, iter.max = 12,
converge = 1e-06, trace.it = FALSE)
}
\arguments{
\item{fit}{A \code{\link{glm}} object.}
\item{subset.factor}{Factor for estimating tau by subset.}
\item{fit.only}{Return only the final fit? If FALSE, also returns the weights and tau estimates.}
\item{iter.max}{Maximum number of iterations.}
\item{converge}{Convergence criterion: difference between model degrees of freedom and Pearson's chi-square. Default 1e-6.}
\item{trace.it}{Display print statments indicating progress}
}
\value{
A list with the following elements.
\item{fit}{the new model fit, updated by the estimated weights}
\item{weights}{vector of weights}
\item{phi}{vector of phi estimates}
}
\description{
MME estimates of binomial dispersion parameter tau (intra-cluster correlation).
}
\details{
Estimates binomial dispersion parameter \eqn{\tau} by the method of moments. Iteratively refits the model by the Williams
procedure, weighting the observations by \eqn{1/\phi_{ij}}{1/\phi_ij},
where \eqn{\phi_{ij}=1+\tau _j(n_{ij}-1)}{\phi_ij=1+\tau_j(n_ij - 1)},
\eqn{j} indexes the subsets, and \eqn{i} indexes the observations.
}
\examples{
birdm.fit <- glm(cbind(y,n-y)~tx-1, binomial, birdm)
RRor(tauWt(birdm.fit))
# 95\% t intervals on 4 df
#
# PF
# PF LL UL
# 0.489 -0.578 0.835
#
# mu.hat LL UL
# txcon 0.737 0.944 0.320
# txvac 0.376 0.758 0.104
#
}
\references{
Williams DA, 1982. Extra-binomial variation in logistic linear models. \emph{Applied Statistics} 31:144-148.
\cr Wedderburn RWM, 1974. Quasi-likelihood functions, generalized linear models, and the Gauss-Newton method. \emph{Biometrika} 61:439-447.
}
\seealso{
\code{\link{phiWt}}, \code{\link{RRor}}.
}
\author{
\link{PF-package}
}
|
7015335e3c36d4f6cca2a18b2bb06b0d44a268e5 | 3456c4248cc7cf37d5e0d38fb0f35614b9975831 | /BasicPokernowStats.R | bacb9689f5cf09e2419200db35ef02df07563885 | [] | no_license | Jamyduff/PokerNowBasicStats | 4724521a60308ac24785666b9306871c1b71fffe | c63ab3f87f2cdaf290fbae2db4f6553ae2d0b233 | refs/heads/main | 2023-03-01T05:47:15.125002 | 2021-02-13T14:20:43 | 2021-02-13T14:20:43 | 338,584,563 | 1 | 0 | null | null | null | null | ISO-8859-13 | R | false | false | 11,129 | r | BasicPokernowStats.R | getwd()
#You need to set your own working directory or simply save the log file in your default working directory
#setwd("C:/Users/XXX/Documents/R/Directory")
x <- read.csv("poker_now_log_16.csv") #Update this to whatever you name your poker.now log file.
D <- x$entry # sets the main column of data as a vector "D"
#print(D)
#Reverse vector D so that the action occurs chronologically from top to bottom
D <- rev(D)
#print(D)
#assign letters to suits - āT„ == h, āT¦ == d, āT == s, āT£ == c # This is not strictly necessary for the stats produced but if you extract the log file at the end it is useful for manually reading the file.
D <- gsub("āT„", "h", D)
D <- gsub("āT¦", "d", D)
D <- gsub("āT£", "c", D)
D <- gsub("āT", "s", D)
#Set P as every line in the vector D containing the below string results with vector locations
P <- grep("joined the game with a stack", D)
#print(P)
#Set PL as every line in the vector D containing the below string results with new vector with each line a row in the vector
PL <- D[c(P)]
#removes duplicates
PL <- unique(PL)
#print(PL)
#install.packages("qdapRegex")
library(qdapRegex)
#remove the excess characters
Players <- rm_between(PL, '\"', ' @ ', extract=TRUE)#[[1]]
print(Players)
#******************************************************************************
#1. VPIP
#This stat stands for voluntarily put money in pot.
#It tells you what percentage of hands somebody plays.
#Create a vector with the opening stack for each played hand.
GP <- grep("Player stacks", D)
#Create a subset with only lines with Player stacks contained.
GPD <- D[c(GP)]
#Separated by |
GPDS <- unlist(strsplit(GPD, "\\|"))
library(qdapRegex)
#Creates a list of every time a player is named as starting a hand
GPDST <- ex_between(GPDS, '\"', "@")
#install.packages("plyr")
library(plyr)
#Change the list to a vector
v <- unlist(GPDST, use.names=FALSE)
#print(v)
#Count each time a name is listed as starting a hand
#count(v)
#table(v)
cv <- plyr::count(v)
#print(cv)
#install.packages("sjmisc")
library(sjmisc)
#create a data frame that will build throughout the loop
df <- data.frame("Header")
names(df)<-c("Header")
#Set a variable for a later if to search for text string in data
fold <- "folds"
stack <- "Player Stack"
flop <- "flop"
end1 <- "ending"
#Set variable i = 1
i <- 1
#Run loop while the main column in D is not equal to nothing
while (D[i] != "") {
if (str_contains(D[i],stack, ignore.case = TRUE)) {
while ((!str_contains(D[i],end1, ignore.case = TRUE)) & (!str_contains(D[i],flop, ignore.case = TRUE))) {
#Check if the current index of the loop of D contains the text "folds" if it does proceed else add 1 to i and continue
if (str_contains(D[i],fold, ignore.case = TRUE)) {
#if true, make a new dataframe "de" equal to the current position in the loop on D
de<-data.frame(D[i])
#Give its coloumn the same name as df's coloumn
names(de)<-c("Header")
#Append de onto the end of df and continue
df <- rbind(df, de)
}
i = i + 1
}
}
# slow down output
#Sys.sleep(0.1)
i = i + 1
}
#make df a vector
vdf <- unlist(df, use.names=FALSE)
#remove everything except for names so it can be compared - this makes it a list again
PLFLD <- ex_between(vdf, '"', "@")
#make it a vector again
vplfld <- unlist(PLFLD, use.names=FALSE)
#make a table of the count
cvplfld <- plyr::count(vplfld)
#install.packages("dplyr")
library(dplyr)
vpip <- left_join(cv, cvplfld, by = c("x" = "x"))
vpip[,4] <- vpip[,3] / vpip[,2]
vpip[,5] <- 1 - vpip[,4]
#colnames(vpip)
names(vpip)[names(vpip) == "x"] <- "Player Name"
names(vpip)[names(vpip) == "freq.x"] <- "Hands Played"
names(vpip)[names(vpip) == "freq.y"] <- "Hands Folded Pre-Flop"
names(vpip)[names(vpip) == "V4"] <- "% Folded"
names(vpip)[names(vpip) == "V5"] <- "VPIP"
vpip[, 4][is.na(vpip[, 4])] <- 0
vpip[, 5][is.na(vpip[, 5])] <- 1
vpip[, 3][is.na(vpip[, 3])] <- 0
vpip <- vpip %>% mutate_at(vars("% Folded", "VPIP"), dplyr::funs(round(., 3)))
#******************************************************************************
#2. PFR
#This is another absolutely crucial poker stat which stands for preflop raise percentage.
#This is the percentage of hands that somebody raises before the flop.
#Create the df dataframe again
df <- data.frame("Header")
names(df)<-c("Header")
#Set a variable for later if to search for text string in data
fold <- "folds"
stack <- "Player Stack"
flop <- "flop"
end1 <- "ending"
raise <- "raise"
#Set variable i = 1
i <- 1
while (D[i] != "") {
if (str_contains(D[i],stack, ignore.case = TRUE)) {
while ((!str_contains(D[i],end1, ignore.case = TRUE)) & (!str_contains(D[i],flop, ignore.case = TRUE))) {
#Check if the current index of the loop of D contains the text "raise" if it does proceed else add 1 to i and continue
if (str_contains(D[i],raise, ignore.case = TRUE)) {
#if true, make a new dataframe "de" equal to the current position in the loop on D
de<-data.frame(D[i])
#Give its coloumn the same name as df's coloumn
names(de)<-c("Header")
#Append de onto the end of df and continue
df <- rbind(df, de)
}
i = i + 1
}
}
# slow down output
#Sys.sleep(0.1)
i = i + 1
}
#make df a vector
vdf <- unlist(df, use.names=FALSE)
#remove everything except for names so it can be compared - this makes it a list again
PLRS <- ex_between(vdf, '"', "@")
#make it a vector again
vplrs <- unlist(PLRS, use.names=FALSE)
#count(vplrs)
cvplrs <- plyr::count(vplrs)
library(dplyr)
#join the number of games played to the raises table
pfr <- left_join(cv, cvplrs, by = c("x" = "x"))
pfr[,4] <- pfr[,3] / pfr[,2]
#colnames(pfr)
names(pfr)[names(pfr) == "x"] <- "Player Name"
names(pfr)[names(pfr) == "freq.x"] <- "Hands Played"
names(pfr)[names(pfr) == "freq.y"] <- "Hands Raised Pre-Flop"
names(pfr)[names(pfr) == "V4"] <- "PFR"
pfr[, 4][is.na(pfr[, 4])] <- 0
pfr[, 5][is.na(pfr[, 5])] <- 1
pfr[, 3][is.na(pfr[, 3])] <- 0
pfr <- pfr %>% mutate_at(vars("PFR"), dplyr::funs(round(., 3)))
#******************************************************************************
#3. AF
#Aggression Factor is another extremely useful poker HUD stat based on the mathematical expression in PokerTracker
#: ( Total Times Bet + Total Times Raised ) / Total Times Called.
dfr <- data.frame("Header")
names(dfr)<-c("Header")
#Set a variable for later if to search for text string in data
fold <- "folds"
stack <- "Player Stack"
flop <- "flop"
end1 <- "ending"
raise <- "raise"
bet <- "bets"
call <- "calls"
#Set variable i = 1
i <- 1
#count the number of times raised
while (D[i] != "") {
if (str_contains(D[i],stack, ignore.case = TRUE)) {
while (!str_contains(D[i],end1, ignore.case = TRUE)) {
#Check if the current index of the loop of D contains the text "raise" if it does proceed else add 1 to i and continue
if (str_contains(D[i],raise, ignore.case = TRUE)) {
#if true, make a new dataframe "de" equal to the current position in the loop on D
de<-data.frame(D[i])
#Give its coloumn the same name as df's coloumn
names(de)<-c("Header")
#Append de onto the end of df and continue
dfr <- rbind(dfr, de)
}
i = i + 1
}
}
i = i + 1
}
#count the times bet and add it to the ongoing dfr
i <- 1
while (D[i] != "") {
if (str_contains(D[i],stack, ignore.case = TRUE)) {
while (!str_contains(D[i],end1, ignore.case = TRUE)) {
#Check if the current index of the loop of D contains the text "bets" if it does proceed else add 1 to i and continue
if (str_contains(D[i],bet, ignore.case = TRUE)) {
#if true, make a new dataframe "de" equal to the current position in the loop on D
de<-data.frame(D[i])
#Give its coloumn the same name as df's coloumn
names(de)<-c("Header")
#Append de onto the end of df and continue
dfr <- rbind(dfr, de)
}
i = i + 1
}
}
i = i + 1
}
#make df a vector
vdfr <- unlist(dfr, use.names=FALSE)
#remove everything except for names so it can be compared - this makes it a list again
PLRS <- ex_between(vdfr, '"', "@")
#make it a vector again
vplrs <- unlist(PLRS, use.names=FALSE)
#count(vplrs)
cvplrs <- plyr::count(vplrs)
#count the number of times called
i <- 1
dfc <- data.frame("Header")
names(dfc)<-c("Header")
while (D[i] != "") {
if (str_contains(D[i],stack, ignore.case = TRUE)) {
while (!str_contains(D[i],end1, ignore.case = TRUE)) {
#Check if the current index of the loop of D contains the text "calls" if it does proceed else add 1 to i and continue
if (str_contains(D[i],call, ignore.case = TRUE)) {
#if true, make a new dataframe "de" equal to the current position in the loop on D
de<-data.frame(D[i])
#Give its coloumn the same name as df's coloumn
names(de)<-c("Header")
#Append de onto the end of df and continue
dfc <- rbind(dfc, de)
}
i = i + 1
}
}
i = i + 1
}
#make df a vector
vdfc <- unlist(dfc, use.names=FALSE)
#remove everything except for names so it can be compared - this makes it a list again
PLC <- ex_between(vdfc, '"', "@")
#make it a vector again
vplc <- unlist(PLC, use.names=FALSE)
#count(vplrs)
cvplc <- plyr::count(vplc)
cvplc <- cvplc[complete.cases(cvplc), ]
cvplrs <- cvplrs[complete.cases(cvplrs), ]
library(dplyr)
AF <- left_join(cvplc, cvplrs, by = c("x" = "x"))
AF[,4] <- AF[,3] / AF[,2]
#colnames(pfr)
names(AF)[names(AF) == "x"] <- "Player Name"
names(AF)[names(AF) == "freq.x"] <- "Calls"
names(AF)[names(AF) == "freq.y"] <- "Raises + Bets"
names(AF)[names(AF) == "V4"] <- "Aggression Factor"
#names(pfr)[names(pfr) == "V5"] <- "% Played"
AF[, 4][is.na(AF[, 4])] <- 0
AF[, 5][is.na(AF[, 5])] <- 1
AF[, 3][is.na(AF[, 3])] <- 0
AF <- AF %>% mutate_at(vars("Aggression Factor"), dplyr::funs(round(., 3)))
FindT <- left_join(vpip, AF, by = c("Player Name" = "Player Name"))
FindT <- left_join(FindT, pfr, by = c("Player Name" = "Player Name"))
Find <- subset(FindT, select = c("Player Name", "VPIP", "PFR", "Aggression Factor"))
print(Find)
library(ggplot2)
barplot(Find$VPIP, main="VPIP for each player", xlab = "Player Names", ylab="VPIP", name=Find[,1], col = rainbow(25),las=2, cex.names=.8, ylim=c(0.0,1.00))
barplot(Find$PFR, main="PFR for each player", xlab = "Player Names", ylab="PFR", name=Find[,1], col = rainbow(25), las=2, cex.names=.8, ylim=c(0.0,0.35))
barplot(Find$`Aggression Factor`, main="AF for each player", xlab = "Player Names", ylab="AF", name=Find[,1], col = rainbow(25), las=2, cex.names=.8, ylim=c(0.0,2.0))
print(vpip)
print(pfr)
print(AF)
print(Find)
|
b584fca349175335bb3ddd67057a3e10fcc384f5 | 844b5558ae4dcbe0607fcab5e1fafb9f1f4de4f8 | /plot1.R | bb4774e5cc57539f12d45c8830086d5099316b08 | [] | no_license | rathimala/ExData_CourseProject1 | b381074c969a244c703aa0963026646b61097998 | c863a78fdf01ad157c3990ef3873bd943eaa322b | refs/heads/master | 2021-01-19T03:38:27.261394 | 2015-02-08T09:18:09 | 2015-02-08T09:18:09 | 30,485,176 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,113 | r | plot1.R | #### Exploratory Data Analysis - Course Project 1 - Plot1.R
#load data set - household_power_consumption
pcData <- as.data.frame(read.csv("household_power_consumption.txt", header = TRUE, sep = ";"))
str(pcData)
# 'data.frame': 2075259 obs. of 9 variables:
# $ Date : Factor w/ 1442 levels "1/1/2007","1/1/2008",..: 342 342 342 342 342 342 342 342 342 342 ...
# $ Time : Factor w/ 1440 levels "00:00:00","00:01:00",..: 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 ...
# $ Global_active_power : Factor w/ 4187 levels "?","0.076","0.078",..: 2082 2654 2661 2668 1807 1734 1825 1824 1808 1805 ...
# $ Global_reactive_power: Factor w/ 533 levels "?","0.000","0.046",..: 189 198 229 231 244 241 240 240 235 235 ...
# $ Voltage : Factor w/ 2838 levels "?","223.200",..: 992 871 837 882 1076 1010 1017 1030 907 894 ...
# $ Global_intensity : Factor w/ 222 levels "?","0.200","0.400",..: 53 81 81 81 40 36 40 40 40 40 ...
# $ Sub_metering_1 : Factor w/ 89 levels "?","0.000","1.000",..: 2 2 2 2 2 2 2 2 2 2 ...
# $ Sub_metering_2 : Factor w/ 82 levels "?","0.000","1.000",..: 3 3 14 3 3 14 3 3 3 14 ...
# $ Sub_metering_3 : num 17 16 17 17 17 17 17 17 17 16 ...
datetime <- paste0(pcData$Date, " ", pcData$Time) #concatenate Date and Time variables
head(datetime)
# [1] "16/12/2006 17:24:00" "16/12/2006 17:25:00" "16/12/2006 17:26:00" "16/12/2006 17:27:00"
# [5] "16/12/2006 17:28:00" "16/12/2006 17:29:00"
datetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
head(datetime)
# [1] "2006-12-16 17:24:00 MYT" "2006-12-16 17:25:00 MYT" "2006-12-16 17:26:00 MYT"
# [4] "2006-12-16 17:27:00 MYT" "2006-12-16 17:28:00 MYT" "2006-12-16 17:29:00 MYT"
class(datetime) #change the datetime variable into date() format
# [1] "POSIXlt" "POSIXt"
datetime <- as.Date(datetime)
class(datetime)
# [1] "Date"
head(datetime) # change the data format to date() format
# [1] "2006-12-16" "2006-12-16" "2006-12-16" "2006-12-16" "2006-12-16" "2006-12-16"
pcData$datetime <- datetime # add a datetime variable (date type) to data frame.
head(pcData)
# Date Time Global_active_power Global_reactive_power Voltage Global_intensity
# 1 16/12/2006 17:24:00 4.216 0.418 234.840 18.400
# 2 16/12/2006 17:25:00 5.360 0.436 233.630 23.000
# 3 16/12/2006 17:26:00 5.374 0.498 233.290 23.000
# 4 16/12/2006 17:27:00 5.388 0.502 233.740 23.000
# 5 16/12/2006 17:28:00 3.666 0.528 235.680 15.800
# 6 16/12/2006 17:29:00 3.520 0.522 235.020 15.000
# Sub_metering_1 Sub_metering_2 Sub_metering_3 datetime
# 1 0.000 1.000 17 2006-12-16
# 2 0.000 1.000 16 2006-12-16
# 3 0.000 2.000 17 2006-12-16
# 4 0.000 1.000 17 2006-12-16
# 5 0.000 1.000 17 2006-12-16
# 6 0.000 2.000 17 2006-12-16
Data_2days <- subset(pcData, datetime >= "2007-02-01" & datetime <= "2007-02-02")
str(Data_2days)
#
# 'data.frame': 2880 obs. of 10 variables:
# $ Date : Factor w/ 1442 levels "1/1/2007","1/1/2008",..: 16 16 16 16 16 16 16 16 16 16 ...
# $ Time : Factor w/ 1440 levels "00:00:00","00:01:00",..: 1 2 3 4 5 6 7 8 9 10 ...
# $ Global_active_power : Factor w/ 4187 levels "?","0.076","0.078",..: 127 127 126 126 125 124 124 124 124 82 ...
# $ Global_reactive_power: Factor w/ 533 levels "?","0.000","0.046",..: 44 45 46 47 45 43 43 43 44 2 ...
# $ Voltage : Factor w/ 2838 levels "?","223.200",..: 1823 1840 1859 1898 1824 1737 1754 1771 1778 1797 ...
# $ Global_intensity : Factor w/ 222 levels "?","0.200","0.400",..: 8 8 8 8 8 8 8 8 8 6 ...
# $ Sub_metering_1 : Factor w/ 89 levels "?","0.000","1.000",..: 2 2 2 2 2 2 2 2 2 2 ...
# $ Sub_metering_2 : Factor w/ 82 levels "?","0.000","1.000",..: 2 2 2 2 2 2 2 2 2 2 ...
# $ Sub_metering_3 : num 0 0 0 0 0 0 0 0 0 0 ...
# $ datetime : Date, format: "2007-02-01" "2007-02-01" "2007-02-01" ...
object_size(Data_2days)
# 543 kB
head(Data_2days)
# Date Time Global_active_power Global_reactive_power Voltage Global_intensity
# 66637 1/2/2007 00:00:00 0.326 0.128 243.150 1.400
# 66638 1/2/2007 00:01:00 0.326 0.130 243.320 1.400
# 66639 1/2/2007 00:02:00 0.324 0.132 243.510 1.400
# 66640 1/2/2007 00:03:00 0.324 0.134 243.900 1.400
# 66641 1/2/2007 00:04:00 0.322 0.130 243.160 1.400
# 66642 1/2/2007 00:05:00 0.320 0.126 242.290 1.400
# Sub_metering_1 Sub_metering_2 Sub_metering_3 datetime
# 66637 0.000 0.000 0 2007-02-01
# 66638 0.000 0.000 0 2007-02-01
# 66639 0.000 0.000 0 2007-02-01
# 66640 0.000 0.000 0 2007-02-01
# 66641 0.000 0.000 0 2007-02-01
# 66642 0.000 0.000 0 2007-02-01
tail(Data_2days)
# Date Time Global_active_power Global_reactive_power Voltage Global_intensity
# 69511 2/2/2007 23:54:00 3.696 0.226 240.710 15.200
# 69512 2/2/2007 23:55:00 3.696 0.226 240.900 15.200
# 69513 2/2/2007 23:56:00 3.698 0.226 241.020 15.200
# 69514 2/2/2007 23:57:00 3.684 0.224 240.480 15.200
# 69515 2/2/2007 23:58:00 3.658 0.220 239.610 15.200
# 69516 2/2/2007 23:59:00 3.680 0.224 240.370 15.200
# Sub_metering_1 Sub_metering_2 Sub_metering_3 datetime
# 69511 0.000 1.000 17 2007-02-02
# 69512 0.000 1.000 18 2007-02-02
# 69513 0.000 2.000 18 2007-02-02
# 69514 0.000 1.000 18 2007-02-02
# 69515 0.000 1.000 17 2007-02-02
# 69516 0.000 2.000 18 2007-02-02
## Plot1.R
class(Data_2days$Global_active_power)
# [1] "factor"
#convert the data type from factor to numeric
Data_2days$Global_active_power <- as.numeric(as.character(Data_2days$Global_active_power))
summary(Data_2days$Global_active_power)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.220 0.320 1.060 1.213 1.688 7.482
png(file="plot1.png", width=480, height=480, units="px")
hist(Data_2days$Global_active_power,col="red", main = " Global Active Power",
xlab = " Global Active Power (Kilowatts)" )
dev.off()
# RStudioGD
# 2
|
b2da9bd9ff10dec6d8546165a0de4cea9613fedb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/verification/examples/conditional.quantile.Rd.R | 8e1bc7411510b4707a3f692d486f5bab51a55cc2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 722 | r | conditional.quantile.Rd.R | library(verification)
### Name: conditional.quantile
### Title: Conditional Quantile Plot
### Aliases: conditional.quantile
### Keywords: file
### ** Examples
set.seed(10)
m<- seq(10, 25, length = 1000)
frcst <- round(rnorm(1000, mean = m, sd = 2) )
obs<- round(rnorm(1000, mean = m, sd = 2 ))
bins <- seq(0, 30,1)
thrs<- c( 10, 20) # number of obs needed for a statistic to be printed #1,4 quartile, 2,3 quartiles
conditional.quantile(frcst, obs, bins, thrs, main = "Sample Conditional Quantile Plot")
#### Or plots a ``cont.cont'' class object.
obs<- rnorm(100)
pred<- rnorm(100)
baseline <- rnorm(100, sd = 0.5)
A<- verify(obs, pred, baseline = baseline, frcst.type = "cont", obs.type = "cont")
plot(A)
|
67b62c3691f127b188d0317f68cac4447d6be0a7 | e7c7e8b21ab45ccf91c01f8faa4d11641606ba12 | /R/20200511/metabolome/data_preparation_metabolome.R | 071193827ff87d39e7aa245bd048c5cf7c2b7882 | [
"Apache-2.0"
] | permissive | mohanbabu29/precision_exposome | 09120274ebb7103ca3c73c002497406709a372e3 | 600c20db7eff1ddfc7b2656ddc538153b1044961 | refs/heads/main | 2023-03-18T02:42:36.202085 | 2021-03-11T17:24:59 | 2021-03-11T17:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,723 | r | data_preparation_metabolome.R | sxtTools::setwd_project()
library(tidyverse)
setwd("data_20200511/metabolome/")
rm(list = ls())
load("clinic_data")
load("met_data")
load("met_tag")
met_data <-
met_data %>%
dplyr::filter(SubjectID == "69-001") %>%
dplyr::filter(CollectionDate >= "2016-01-12",
CollectionDate <= "2016-03-03")
variable_info <- met_tag
expression_data <- met_data
sample_info <-
expression_data %>%
dplyr::select(sample_id = SampleID,
subject_id = SubjectID,
CollectionDate,
CL1, CL2, CL3, CL4)
expression_data <-
expression_data %>%
dplyr::select(-c(SampleID, SubjectID,
CollectionDate,
CL1, CL2, CL3, CL4))
expression_data <-
t(expression_data) %>%
as.data.frame()
colnames(expression_data) <- sample_info$sample_id
rownames(expression_data)
variable_info <-
variable_info %>%
dplyr::distinct(Compounds_ID, .keep_all = TRUE)
variable_info <-
variable_info %>%
dplyr::filter(Compounds_ID %in% rownames(expression_data))
variable_info <-
variable_info[match(rownames(expression_data), variable_info$Compounds_ID),]
variable_info$Compounds_ID == rownames(expression_data)
variable_info <-
variable_info %>%
dplyr::select(Compounds_ID, everything()) %>%
dplyr::rename(peak_name = Compounds_ID)
save(variable_info, file = "variable_info")
save(sample_info, file = "sample_info")
save(expression_data, file = "expression_data")
library(openxlsx)
wb = createWorkbook()
modifyBaseFont(wb, fontSize = 12, fontName = "Arial Narrow")
addWorksheet(wb, sheetName = "Sample information", gridLines = TRUE)
addWorksheet(wb, sheetName = "Variable information", gridLines = TRUE)
addWorksheet(wb, sheetName = "Expression data", gridLines = TRUE)
freezePane(wb, sheet = 1, firstRow = TRUE, firstCol = TRUE)
freezePane(wb, sheet = 2, firstRow = TRUE, firstCol = TRUE)
freezePane(wb, sheet = 3, firstRow = TRUE, firstCol = FALSE)
writeDataTable(wb, sheet = 1, x = sample_info,
colNames = TRUE, rowNames = FALSE)
writeDataTable(wb, sheet = 2, x = variable_info,
colNames = TRUE, rowNames = FALSE)
writeDataTable(wb, sheet = 3, x = expression_data,
colNames = TRUE, rowNames = FALSE)
saveWorkbook(wb, "metabolome_data.xlsx", overwrite = TRUE)
# clinic_data <-
# clinic_data %>%
# dplyr::filter(SubjectID == "69-001") %>%
# dplyr::rename(subject_id = SubjectID,
# sample_id = SampleID)
# save(clinic_data, file = "clinic_data")
load("clinic_data")
###match using database from Peng
# variable_info_pos <-
# variable_info %>%
# dplyr::filter(stringr::str_detect(peak_name, "p")) %>%
# dplyr::select(peak_name, Mass) %>%
# dplyr::mutate(rt = stringr::str_split(peak_name, "\\_") %>%
# lapply(function(x)x[3]) %>%
# unlist()
# ) %>%
# dplyr::mutate(rt = as.numeric(rt) * 60) %>%
# dplyr::rename(name = peak_name, mz = Mass)
#
# variable_info_pos$mz <-
# stringr::str_split(variable_info_pos$mz, pattern = "\\_") %>%
# lapply(function(x){x[1]}) %>% unlist() %>% as.numeric()
#
# variable_info_neg <-
# variable_info %>%
# dplyr::filter(stringr::str_detect(peak_name, "n")) %>%
# dplyr::select(peak_name, Mass) %>%
# dplyr::mutate(rt =
# stringr::str_extract(peak_name,
# "[0-9]{1}\\.[0-9]{1}")) %>%
# dplyr::mutate(rt = as.numeric(rt) * 10) %>%
# dplyr::rename(name = peak_name, mz = Mass)
#
# variable_info_neg$mz <-
# stringr::str_split(variable_info_neg$mz, pattern = "\\_") %>%
# lapply(function(x){x[1]}) %>% unlist() %>% as.numeric()
#
# variable_info_pos_hilic <-
# variable_info_pos %>%
# dplyr::filter(stringr::str_detect(name, "HILIC"))
#
# variable_info_pos_rplc <-
# variable_info_pos %>%
# dplyr::filter(stringr::str_detect(name, "RPLC"))
#
# variable_info_neg_hilic <-
# variable_info_neg %>%
# dplyr::filter(stringr::str_detect(name, "HILIC"))
#
# variable_info_neg_rplc <-
# variable_info_neg %>%
# dplyr::filter(stringr::str_detect(name, "RPLC"))
#
# write.csv(variable_info_pos, "variable_info_pos.csv", row.names = FALSE)
# write.csv(variable_info_pos_hilic, "variable_info_pos_hilic.csv", row.names = FALSE)
# write.csv(variable_info_pos_rplc, "variable_info_pos_rplc.csv", row.names = FALSE)
#
# write.csv(variable_info_neg, "variable_info_neg.csv", row.names = FALSE)
# write.csv(variable_info_neg_hilic, "variable_info_neg_hilic.csv", row.names = FALSE)
# write.csv(variable_info_neg_rplc, "variable_info_neg_rplc.csv", row.names = FALSE)
#
#
# library(metID)
#
# result1_pos <- identify_metabolites(ms1.data = "variable_info_pos.csv",
# polarity = "positive", ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "list1_ms1_database")
#
#
# result2_pos <- identify_metabolites(ms1.data = "variable_info_pos.csv",
# polarity = "positive", ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "list2_ms1_database")
#
# result3_pos <- identify_metabolites(ms1.data = "variable_info_pos.csv",
# polarity = "positive",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "nonspecific_biomarkers_ms1_database")
#
# result4_pos <- identify_metabolites(ms1.data = "variable_info_pos.csv",
# polarity = "positive",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "select_exposome_ms1_database")
# # load("select_exposome")
# # result4_pos <- mz_match(ms1.table = variable_info_pos,
# # database = select_exposome,
# # mz.error.tol = 25)
#
# result5_pos <- identify_metabolites(ms1.data = "variable_info_pos.csv",
# polarity = "positive",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "t3db_ms1_database")
#
# result6_pos <- identify_metabolites(ms1.data = "variable_info_pos.csv",
# polarity = "positive",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "specific_biomarker_ms1_database")
#
# result7_pos <- identify_metabolites(ms1.data = "variable_info_pos.csv",
# polarity = "positive",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "hmdbMS1Database0.0.1")
#
#
# result8_pos <- identify_metabolites(ms1.data = "variable_info_pos_hilic.csv",
# polarity = "positive",
# ce = 'all',
# rt.match.tol = 30,
# column = "hilic",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "msDatabase_hilic0.0.2")
#
# result9_pos <- identify_metabolites(ms1.data = "variable_info_pos_rplc.csv",
# polarity = "positive",
# ce = 'all',
# rt.match.tol = 30,
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "msDatabase_rplc0.0.2")
#
#
# annotation_table_pos1 <-
# get_identification_table(result1_pos,
# result2_pos,
# result4_pos,
# result5_pos,
# result6_pos,
# result7_pos,
# type = "old",
# candidate.num = 1)
#
#
# annotation_table_pos1$Identification <-
# annotation_table_pos1$Identification %>%
# lapply(function(x){
# if(is.na(x)){
# return(NA)
# }else{
# x <- stringr::str_split(x, "\\{\\}")[[1]]
# x <- grep("\\(M\\+H\\)|\\(2M|H2O", x, value = TRUE)
# if(length(x) == 0){
# return(NA)
# }else{
# paste(x, collapse = "{}")
# }
# }
# }) %>%
# unlist()
#
# annotation_table_pos1 <-
# metID::trans2newStyle(identification.table = annotation_table_pos1)
#
# annotation_table_pos1 <-
# annotation_table_pos1 %>%
# dplyr::filter(!is.na(Compound.name))
#
# annotation_table_pos2 <-
# get_identification_table(result8_pos,
# type = "old",
# candidate.num = 1)
#
# annotation_table_pos2$Identification <-
# annotation_table_pos2$Identification %>%
# lapply(function(x){
# if(is.na(x)){
# return(NA)
# }else{
# x <- stringr::str_split(x, "\\{\\}")[[1]]
# x <- grep("\\(M\\+H\\)|\\(2M|H2O", x, value = TRUE)
# if(length(x) == 0){
# return(NA)
# }else{
# paste(x, collapse = "{}")
# }
# }
# }) %>%
# unlist()
#
# annotation_table_pos2 <-
# metID::trans2newStyle(identification.table = annotation_table_pos2)
#
# annotation_table_pos2 <-
# annotation_table_pos2 %>%
# dplyr::filter(!is.na(Compound.name))
#
#
# annotation_table_pos3 <-
# get_identification_table(result9_pos,
# type = "old",
# candidate.num = 1)
#
#
# annotation_table_pos3$Identification <-
# annotation_table_pos3$Identification %>%
# lapply(function(x){
# if(is.na(x)){
# return(NA)
# }else{
# x <- stringr::str_split(x, "\\{\\}")[[1]]
# x <- grep("\\(M\\+H\\)|\\(2M|H2O", x, value = TRUE)
# if(length(x) == 0){
# return(NA)
# }else{
# paste(x, collapse = "{}")
# }
# }
# }) %>%
# unlist()
#
# annotation_table_pos3 <-
# metID::trans2newStyle(identification.table = annotation_table_pos3)
#
#
# annotation_table_pos3 <-
# annotation_table_pos3 %>%
# dplyr::filter(!is.na(Compound.name))
#
#
# # result4_pos <-
# # result4_pos %>%
# # dplyr::filter(!is.na(Compound.name))
# #
# # annotation_table_pos1 <-
# # annotation_table_pos1 %>%
# # dplyr::filter(!name %in% result4_pos$name)
# #
# #
# # annotation_table_pos1 <-
# # rbind(annotation_table_pos1, result4_pos)
#
# annotation_table_pos1 <-
# annotation_table_pos1 %>%
# dplyr::filter(!name %in% annotation_table_pos2$name) %>%
# dplyr::filter(!name %in% annotation_table_pos3$name)
#
#
#
# annotation_table_pos <-
# rbind(annotation_table_pos1[,-4],
# annotation_table_pos2,
# annotation_table_pos3)
#
# write.csv(annotation_table_pos,
# file = "annotation_table_pos.csv",
# row.names = FALSE)
#
#
#
# #####negative
# result1_neg <- identify_metabolites(ms1.data = "variable_info_neg.csv",
# polarity = "negative", ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "list1_ms1_database")
#
#
# result2_neg <- identify_metabolites(ms1.data = "variable_info_neg.csv",
# polarity = "negative", ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "list2_ms1_database")
#
# result3_neg <- identify_metabolites(ms1.data = "variable_info_neg.csv",
# polarity = "negative",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "nonspecific_biomarkers_ms1_database")
#
# result4_neg <- identify_metabolites(ms1.data = "variable_info_neg.csv",
# polarity = "negative",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "select_exposome_ms1_database")
# # load("select_exposome")
# # result4_neg <- mz_match(ms1.table = variable_info_neg,
# # database = select_exposome,
# # mz.error.tol = 25)
#
# result5_neg <- identify_metabolites(ms1.data = "variable_info_neg.csv",
# polarity = "negative",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "t3db_ms1_database")
#
# result6_neg <- identify_metabolites(ms1.data = "variable_info_neg.csv",
# polarity = "negative",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "specific_biomarker_ms1_database")
#
# result7_neg <- identify_metabolites(ms1.data = "variable_info_neg.csv",
# polarity = "negative",
# ce = 'all',
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "hmdbMS1Database0.0.1")
#
#
# result8_neg <- identify_metabolites(ms1.data = "variable_info_neg_hilic.csv",
# polarity = "negative",
# ce = 'all',
# rt.match.tol = 30,
# column = "hilic",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "msDatabase_hilic0.0.2")
#
# result9_neg <- identify_metabolites(ms1.data = "variable_info_neg_rplc.csv",
# polarity = "negative",
# ce = 'all',
# rt.match.tol = 30,
# column = "rp",
# total.score.tol = 0.5,
# candidate.num = 3,
# threads = 3,
# database = "msDatabase_rplc0.0.2")
#
#
# annotation_table_neg1 <-
# get_identification_table(result1_neg,
# result2_neg,
# result4_neg,
# result5_neg,
# result6_neg,
# result7_neg,
# type = "old",
# candidate.num = 1)
#
#
# annotation_table_neg1$Identification <-
# annotation_table_neg1$Identification %>%
# lapply(function(x){
# if(is.na(x)){
# return(NA)
# }else{
# x <- stringr::str_split(x, "\\{\\}")[[1]]
# x <- grep("\\(M\\-H\\)|\\(2M|H2O", x, value = TRUE)
# if(length(x) == 0){
# return(NA)
# }else{
# paste(x, collapse = "{}")
# }
# }
# }) %>%
# unlist()
#
# annotation_table_neg1 <-
# metID::trans2newStyle(identification.table = annotation_table_neg1)
#
# annotation_table_neg1 <-
# annotation_table_neg1 %>%
# dplyr::filter(!is.na(Compound.name))
#
# annotation_table_neg2 <-
# get_identification_table(result8_neg,
# type = "old",
# candidate.num = 1)
#
# annotation_table_neg2$Identification <-
# annotation_table_neg2$Identification %>%
# lapply(function(x){
# if(is.na(x)){
# return(NA)
# }else{
# x <- stringr::str_split(x, "\\{\\}")[[1]]
# x <- grep("\\(M\\-H\\)|\\(2M|H2O", x, value = TRUE)
# if(length(x) == 0){
# return(NA)
# }else{
# paste(x, collapse = "{}")
# }
# }
# }) %>%
# unlist()
#
# annotation_table_neg2 <-
# metID::trans2newStyle(identification.table = annotation_table_neg2)
#
# annotation_table_neg2 <-
# annotation_table_neg2 %>%
# dplyr::filter(!is.na(Compound.name))
#
#
# annotation_table_neg3 <-
# get_identification_table(result9_neg,
# type = "old",
# candidate.num = 1)
#
#
# annotation_table_neg3$Identification <-
# annotation_table_neg3$Identification %>%
# lapply(function(x){
# if(is.na(x)){
# return(NA)
# }else{
# x <- stringr::str_split(x, "\\{\\}")[[1]]
# x <- grep("\\(M\\-H\\)|\\(2M|H2O", x, value = TRUE)
# if(length(x) == 0){
# return(NA)
# }else{
# paste(x, collapse = "{}")
# }
# }
# }) %>%
# unlist()
#
# annotation_table_neg3 <-
# metID::trans2newStyle(identification.table = annotation_table_neg3)
#
#
# annotation_table_neg3 <-
# annotation_table_neg3 %>%
# dplyr::filter(!is.na(Compound.name))
#
#
# # result4_neg <-
# # result4_neg %>%
# # dplyr::filter(!is.na(Compound.name))
# #
# # annotation_table_neg1 <-
# # annotation_table_neg1 %>%
# # dplyr::filter(!name %in% result4_neg$name)
# #
# #
# # annotation_table_neg1 <-
# # rbind(annotation_table_neg1, result4_neg)
#
# annotation_table_neg1 <-
# annotation_table_neg1 %>%
# dplyr::filter(!name %in% annotation_table_neg2$name) %>%
# dplyr::filter(!name %in% annotation_table_neg3$name)
#
#
#
# annotation_table_neg <-
# rbind(annotation_table_neg1[,-4],
# annotation_table_neg2,
# annotation_table_neg3)
#
# write.csv(annotation_table_neg,
# file = "annotation_table_neg.csv",
# row.names = FALSE)
###internal exposome is from peng
internal_exposome <- readxl::read_xlsx("Internal exposome (1).xlsx", sheet = 2)
|
9e470dc61bab9ca1d7fb87b3d03632af09907900 | 8f1fb4630ff3a4b45e3f250100a26809a1e9b05e | /man/closeSession.Rd | 0547314657c64bdb5546883ba13d6fba607b8b98 | [] | no_license | kpnDataScienceLab/modelFactoryR | 9ad0e5395766ef6704bc31bdbd6d686b77800f7a | 8890e7f7f0173fe0d054cbe3aa5311fc0396706a | refs/heads/master | 2020-12-31T06:56:42.958537 | 2017-01-01T21:34:59 | 2017-01-01T21:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 579 | rd | closeSession.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect.R
\name{closeSession}
\alias{closeSession}
\title{Close the current session and update the end_time of run in model_factory.run_history table for current session session with the current timestamp}
\usage{
closeSession()
}
\value{
The result of taQuery to update the model_factory.run_history table
}
\description{
Close the current session and update the end_time of run in model_factory.run_history table for current session session with the current timestamp
}
\examples{
closeSession()
}
|
25f16b205a7b5d8cdb06ae471c0de3c82158d5e3 | 14c2f47364f72cec737aed9a6294d2e6954ecb3e | /man/minGroupCount.Rd | a32302dc5fc2ee91d458098ece3043bef720b0a9 | [] | no_license | bedapub/ribiosNGS | ae7bac0e30eb0662c511cfe791e6d10b167969b0 | a6e1b12a91068f4774a125c539ea2d5ae04b6d7d | refs/heads/master | 2023-08-31T08:22:17.503110 | 2023-08-29T15:26:02 | 2023-08-29T15:26:02 | 253,536,346 | 2 | 3 | null | 2022-04-11T09:36:23 | 2020-04-06T15:18:41 | R | UTF-8 | R | false | true | 1,041 | rd | minGroupCount.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/minGroupCount.R
\name{minGroupCount}
\alias{minGroupCount}
\alias{minGroupCount.DGEList}
\alias{minGroupCount.EdgeObject}
\title{Return the size of the smallest group}
\usage{
minGroupCount(obj)
\method{minGroupCount}{DGEList}(obj)
\method{minGroupCount}{EdgeObject}(obj)
}
\arguments{
\item{obj}{A \code{DGEList} or \code{EdgeObject} object}
}
\value{
Integer
}
\description{
Return the size of the smallest group
}
\section{Methods (by class)}{
\itemize{
\item \code{minGroupCount(DGEList)}: Return the size of the smallest group defined in
the \code{DGEList} object
\item \code{minGroupCount(EdgeObject)}: Return the size of the smallest group defined in
the \code{EdgeObject} object
}}
\examples{
y <- matrix(rnbinom(12000,mu=10,size=2),ncol=6)
d <- DGEList(counts=y, group=rep(1:3,each=2))
minGroupCount(d) ## 2
d2 <- DGEList(counts=y, group=rep(1:2,each=3))
minGroupCount(d2) ## 3
d3 <- DGEList(counts=y, group=rep(1:3, 1:3))
minGroupCount(d3) ## 1
}
|
0ea4ab8355f14f241ee889d1b4fef225049f2e23 | 207befd999fb4e9b8fe3590aaed8aa9820ebcec4 | /R-code/02-MomentumFunds.R | 06b1df9a03e0d2dedf593010a19135118888b92f | [] | no_license | HectorMurman/Momentum | f01bbd00df9c1f213bca921bdb935a2dc4e54c1d | 05f9f6dc6253d372478baca55ca41bf3058d9241 | refs/heads/master | 2023-03-16T05:03:24.920459 | 2015-01-22T01:15:44 | 2015-01-22T01:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,172 | r | 02-MomentumFunds.R | # momentum funds
library(dplyr)
library(reshape2)
library(ggplot2)
# AMOMX
########################################################################
SPDR <- read.csv("~/GitHub/Momentum/Data/SPDR.csv")
AMOMX <- read.csv("~/GitHub/Momentum/Data/AMOMX.csv")
AMOMX$Date <- as.Date(AMOMX$Date, format="%m/%d/%Y")
SPDR$Date <- as.Date(SPDR$Date, format="%m/%d/%Y")
# join data and calculate returns
mom <- AMOMX %>% inner_join(SPDR, by="Date") %>%
select(c(1,5,10)) %>%
rename(AMOMXclose = Close.x,
SPDRclose = Close.y)
mom <- mom[order(mom$Date),]
N <- nrow(mom)
mom$AMOMXr <- c(0, rep(NA, N-1))
mom$SPDRr <- c(0, rep(NA, N-1))
for (i in 2:N) {
mom$AMOMXr[i] <- (mom$AMOMXclose[i] - mom$AMOMXclose[i-1]) / mom$AMOMXclose[i-1]
mom$SPDRr[i] <- (mom$SPDRclose[i] - mom$SPDRclose[i-1]) / mom$SPDRclose[i-1]
}
mom$AMOMXcr <- cumret(mom$AMOMXr)
mom$SPDRcr <- cumret(mom$SPDRr)
mom %>% select(c(1,6,7)) %>% melt(id.vars="Date", value.name="CumReturn") %>%
ggplot(aes(Date, CumReturn)) +
geom_line(aes(colour=variable)) +
labs(colour="Portfolio") + theme_bw() + ylab("Cumulative return")
# BRSMX
################################################################
SPDR <- read.csv("~/GitHub/Momentum/Data/SPDR.csv")
BRSMX <- read.csv("~/GitHub/Momentum/Data/BRSMX.csv")
BRSMX$Date <- as.Date(BRSMX$Date, format="%m/%d/%Y")
SPDR$Date <- as.Date(SPDR$Date, format="%m/%d/%Y")
# join data and calculate returns
mom <- BRSMX %>% inner_join(SPDR, by="Date") %>%
select(c(1,5,10)) %>%
rename(BRSMXclose = Close.x,
SPDRclose = Close.y)
mom <- mom[order(mom$Date),]
N <- nrow(mom)
mom$BRSMXr <- c(0, rep(NA, N-1))
mom$SPDRr <- c(0, rep(NA, N-1))
for (i in 2:N) {
mom$BRSMXr[i] <- (mom$BRSMXclose[i] - mom$BRSMXclose[i-1]) / mom$BRSMXclose[i-1]
mom$SPDRr[i] <- (mom$SPDRclose[i] - mom$SPDRclose[i-1]) / mom$SPDRclose[i-1]
}
mom$BRSMXcr <- cumret(mom$BRSMXr)
mom$SPDRcr <- cumret(mom$SPDRr)
mom %>% select(c(1,6,7)) %>% melt(id.vars="Date", value.name="CumReturn") %>%
ggplot(aes(Date, CumReturn)) +
geom_line(aes(colour=variable)) +
labs(colour="Portfolio") + theme_bw() + ylab("Cumulative return")
# PDP
################################################################
SPDR <- read.csv("~/GitHub/Momentum/Data/SPDR.csv")
PDP <- read.csv("~/GitHub/Momentum/Data/PDP.csv")
PDP$Date <- as.Date(PDP$Date, format="%m/%d/%Y")
SPDR$Date <- as.Date(SPDR$Date, format="%m/%d/%Y")
# join data and calculate returns
mom <- PDP %>% inner_join(SPDR, by="Date") %>%
select(c(1,5,10)) %>%
rename(PDPclose = Close.x,
SPDRclose = Close.y)
mom <- mom[order(mom$Date),]
N <- nrow(mom)
mom$PDPr <- c(0, rep(NA, N-1))
mom$SPDRr <- c(0, rep(NA, N-1))
for (i in 2:N) {
mom$PDPr[i] <- (mom$PDPclose[i] - mom$PDPclose[i-1]) / mom$PDPclose[i-1]
mom$SPDRr[i] <- (mom$SPDRclose[i] - mom$SPDRclose[i-1]) / mom$SPDRclose[i-1]
}
mom$PDPcr <- cumret(mom$PDPr)
mom$SPDRcr <- cumret(mom$SPDRr)
mom %>% select(c(1,6,7)) %>% melt(id.vars="Date", value.name="CumReturn") %>%
ggplot(aes(Date, CumReturn)) +
geom_line(aes(colour=variable)) +
labs(colour="Portfolio") + theme_bw() + ylab("Cumulative return")
# RYAMX
################################################################
SPDR <- read.csv("~/GitHub/Momentum/Data/SPDR.csv")
RYAMX <- read.csv("~/GitHub/Momentum/Data/RYAMX.csv")
RYAMX$Date <- as.Date(RYAMX$Date, format="%m/%d/%Y")
SPDR$Date <- as.Date(SPDR$Date, format="%m/%d/%Y")
# join data and calculate returns
mom <- RYAMX %>% inner_join(SPDR, by="Date") %>%
select(c(1,5,10)) %>%
rename(RYAMXclose = Close.x,
SPDRclose = Close.y)
mom <- mom[order(mom$Date),]
N <- nrow(mom)
mom$RYAMXr <- c(0, rep(NA, N-1))
mom$SPDRr <- c(0, rep(NA, N-1))
for (i in 2:N) {
mom$RYAMXr[i] <- (mom$RYAMXclose[i] - mom$RYAMXclose[i-1]) / mom$RYAMXclose[i-1]
mom$SPDRr[i] <- (mom$SPDRclose[i] - mom$SPDRclose[i-1]) / mom$SPDRclose[i-1]
}
mom$RYAMXcr <- cumret(mom$RYAMXr)
mom$SPDRcr <- cumret(mom$SPDRr)
mom %>% select(c(1,6,7)) %>% melt(id.vars="Date", value.name="CumReturn") %>%
ggplot(aes(Date, CumReturn)) +
geom_line(aes(colour=variable)) +
labs(colour="Portfolio") + theme_bw() + ylab("Cumulative return")
|
b2f7eb7cfa34b8eeb03bb097cdc11f029d971334 | 486488f50a2be27afd024944e4addc245d4a7075 | /R/WatershedStorage.R | e85c87c7fc3c9750e0878691819243e5aa86b9a6 | [] | no_license | jjagdeo/climateimpacts | 05c878c39aa2183435ec1eddc6ffb5641a39d4ae | badb89da06877077ae80c9d0548ba17018a19b70 | refs/heads/master | 2021-03-05T22:54:35.568603 | 2020-03-19T20:26:15 | 2020-03-19T20:26:15 | 246,160,321 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 970 | r | WatershedStorage.R | #' Volume of Water Stored in Watershed
#'
#' Function describing water stored in a watershed/year using simplified inflow/outflow processes
#'
#'
#' @param precip volume of precipitation in inches/year
#' @param evap volume of water evaporated in inches/year
#' @param runoff volume of runoff in inches/year
#' @param watershed_size total area of watershed in sq miles
#' @return storage the volume of water stored in the watershed in cubic ft/year
WatershedStorage = function(precip, evap, runoff, watershed_size) {
storage = ((precip - (evap + runoff)) * 0.0833333) * (watershed_size * 27880000)
# Multiply by watershed size to get volumetric storage from rate inputs: precip, evap, runoff
# Multiple watershed_size (given by user in square miles) by 27,880,000 to convert to square feet
# Multiply precip - (evap + runoff) (given by user in inches) by 0.0833333 to convert to feet
return(storage)
# Storage is returned in units of cubic feet per year
}
|
137e5f4cef0bf284051fff9a7a1bd2827c088f15 | d41be2147fa6b695a6ee7b4e4e6870b19e097ee4 | /man/orcid_pull_name.Rd | 36c98d182bc2cc025747c009d4f5f39146983f38 | [] | no_license | bromptonista/collaborator | 4bea5f2c9f1ae7eb1b524a873b13b2a00111db13 | 58300a509c45d4791ad99176b42a122e928bd58c | refs/heads/master | 2020-09-26T22:24:58.827759 | 2019-10-24T15:31:44 | 2019-10-24T15:31:44 | 226,356,170 | 1 | 0 | null | 2019-12-06T15:19:55 | 2019-12-06T15:19:54 | null | UTF-8 | R | false | true | 658 | rd | orcid_pull_name.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orcid_pull_name.R
\name{orcid_pull_name}
\alias{orcid_pull_name}
\title{Pull first name(s) and last name for a given list of orcid}
\usage{
orcid_pull_name(list_orcid, initials = TRUE, position = "right")
}
\arguments{
\item{list_orcid}{List of orcid ids (XXXX-XXXX-XXXX-XXXX format)}
\item{initials}{Should the first / middle name(s) be converted to initials (default = TRUE)}
\item{position}{initials to "left" or "right" of last name (default = "right")}
}
\value{
Dataframe with 3 mandatory columns: orcid, first names (fn_orcid) and last name (ln_orcid)
}
\description{
}
|
ba2608cbe8b1f532e3848abdc7e6731f806bd75d | 1fc75d5c1d2ae986fd44b2b4c1f3981227a388b4 | /R/rrmake.R | 127725f4f9af9ae7eab03c61e5ea97d714089733 | [] | no_license | bcipolli/rprojroot | 1853390dce73b8b4035420542f2f69a587639605 | 71bd742a4e4ba4e246e4f580697e5a1702117ccc | refs/heads/master | 2023-01-06T22:37:05.193625 | 2017-06-13T08:42:34 | 2017-06-13T08:42:34 | 107,057,897 | 0 | 1 | null | 2017-10-15T23:50:39 | 2017-10-15T23:50:39 | null | UTF-8 | R | false | false | 347 | r | rrmake.R | make_find_root_file <- function(criterion) {
force(criterion)
eval(bquote(function(..., path = ".") {
find_root_file(..., criterion = criterion, path = path)
}))
}
make_fix_root_file <- function(criterion, path) {
root <- find_root(criterion = criterion, path = path)
eval(bquote(function(...) {
file.path(.(root), ...)
}))
}
|
bc6120b0abf8e684f25cabe99e89093262fd59b4 | a0393190707dbee707b070020399d87db692ff5b | /homework2/homework2.R | 5948b2a7f207e13907e92c31017bbc49cbd38bca | [] | no_license | jpreyer/statistics-one | f1b639cdf5172212e870eecbf2d12e7cfb946a36 | 48ccaf2ea8d0352ffd45d5e8f15785d7e96f982c | refs/heads/master | 2021-01-10T20:54:40.878449 | 2012-12-05T21:51:17 | 2012-12-05T21:51:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 954 | r | homework2.R | library(psych)
setwd("~/projects/statistics-one/homework2")
data <- read.table("DAA.02.txt", header=T)
names (data)
class(data)
print ("DES")
describe (data$pre.wm.s1[data$cond=="des"])
describe (data$pre.wm.s2[data$cond=="des"])
describe (data$post.wm.s1[data$cond=="des"])
describe (data$post.wm.s2[data$cond=="des"])
describe (data$pre.wm.v1[data$cond=="des"])
describe (data$pre.wm.v2[data$cond=="des"])
describe (data$post.wm.v1[data$cond=="des"])
describe (data$post.wm.v2[data$cond=="des"])
print ("AER")
describe (data$pre.wm.s1[data$cond=="aer"])
describe (data$pre.wm.s2[data$cond=="aer"])
describe (data$post.wm.s1[data$cond=="aer"])
describe (data$post.wm.s2[data$cond=="aer"])
describe (data$pre.wm.v1[data$cond=="aer"])
describe (data$pre.wm.v2[data$cond=="aer"])
describe (data$post.wm.v1[data$cond=="aer"])
describe (data$post.wm.v2[data$cond=="aer"])
cor ((data$pre.wm.s1[data$cond=="des"]),(data$pre.wm.s1[data$cond=="aer"]))
|
5b295f0730239c3f84157e4b0a0a493316f6b9c1 | 1f90f3e57539d5957b4a331dee0e7f4f734bbff9 | /session_one/sitrep/app.R | 0f0a77d3903aeff56a39613b84057e49400146d3 | [
"CC0-1.0"
] | permissive | Allisterh/shiny_beginners | 94e1399be1c67f1280e1c11251253404b94f3cec | ece589217d54980a970f96d127d70b05434db058 | refs/heads/main | 2023-09-02T10:12:25.486651 | 2021-11-18T15:03:58 | 2021-11-18T15:03:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,105 | r | app.R | library(shiny)
library(DT)
library(lubridate)
library(tidyverse)
load("ShinyContactData.rda")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Sitrep"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput(
"yearInput",
"Select year(s)",
choices = c(2020, 2019, 2018),
multiple = TRUE
)
),
# Show a plot of the generated distribution
mainPanel(
DTOutput("sitrepTable")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$sitrepTable <- renderDT({
cat(str(ShinyContactData))
ShinyContactData %>%
filter(Year %in% input$yearInput) %>%
group_by(Month, Group1) %>%
summarise(count = n()) %>%
ungroup() %>%
spread(., Group1, count)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
d5e4749f9d04dc3afd667d80dd2e59b0ee664db2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/dispRity/examples/geomorph.ordination.Rd.R | 479382ade0bf2f8c2d74366b820903fb48a28b18 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,266 | r | geomorph.ordination.Rd.R | library(dispRity)
### Name: geomorph.ordination
### Title: Imports data from geomorph
### Aliases: geomorph.ordination
### ** Examples
## Not run:
##D require(geomorph)
##D ## Loading the plethodon dataset
##D data(plethodon)
##D
##D ## Performing a Procrustes transform
##D procrustes <- geomorph::gpagen(plethodon$land, PrinAxes = FALSE)
##D
##D ## Obtaining the ordination matrix
##D geomorph.ordination(procrustes)
##D
##D
##D ## Using a geomorph.data.frame
##D geomorph_df <- geomorph.data.frame(procrustes, species = plethodon$species)
##D
##D geomorph.ordination(geomorph_df)
##D
##D ## Calculating disparity from dispRity or geomorph::morphol.disparity
##D geomorph_disparity <- geomorph::morphol.disparity(coords ~ 1,
##D groups= ~ species, data = geomorph_df)
##D dispRity_disparity <- dispRity(geomorph.ordination(geomorph_df),
##D metric = function(X) return(sum(X^2)/nrow(X)))
##D
##D ## Extracting the raw disparity values
##D geomorph_val <- round(as.numeric(geomorph_disparity$Procrustes.var), 15)
##D dispRity_val <- as.vector(summary(dispRity_disparity, digits = 15)$obs)
##D
##D ## Comparing the values (to the 15th decimal!)
##D geomorph_val == dispRity_val # all TRUE
## End(Not run)
|
6b712f30da267bfbd0a058a1924bd2c399c2dda2 | d306926e2f769b36e35d1d0aaf190ddfa9a038a5 | /man-roxygen/alias-assign.R | 4cc52fe9b4cd976c7629c44776362c735c2555bf | [] | no_license | QianFeng2020/r2dii.match | 06d1521010d3a6348395f16fc86af7261d49fa08 | edf442eb5c0bae9792bd501638544f7c16ee49b2 | refs/heads/master | 2020-12-24T04:56:51.197026 | 2020-01-28T21:34:18 | 2020-01-28T21:34:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 412 | r | alias-assign.R | #' @section Assigning aliases:
#' The process to assign an alias for a `name_*` column (i.e. the process to
#' create the `alias_*` columns) applies best practices
#' commonly used in name matching algorithms:
#' * Remove special characters.
#' * Replace language specific characters.
#' * Abbreviate certain names to reduce their importance in the matching.
#' * Spell out numbers to increase their importance.
|
ffe97afb823eb14cee6126dc891155c47400e5ff | 7f71d073e439f9a85b53d530cdbe140be82ff237 | /man/wiki_diff.Rd | 24b68b45a1f029a507477e3c7d98e558727eb40e | [
"MIT"
] | permissive | OrenBochman/WikipediR | 9deff0062af6f0f3e3f8014fb492e4b9fac75ad4 | e3dc3aec9a4282d4214ef903a4488b1c5878ff91 | refs/heads/master | 2021-01-21T21:14:53.336604 | 2014-12-06T10:00:40 | 2014-12-06T10:00:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,483 | rd | wiki_diff.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{wiki_diff}
\alias{wiki_diff}
\title{Generates a "diff" between a pair of revisions}
\usage{
wiki_diff(con, revisions, properties = c("ids", "flags", "timestamp", "user",
"userid", "size", "sha1", "contentmodel", "comment", "parsedcomment", "tags",
"flagged"), direction = c("prev", "next", "cur"))
}
\arguments{
\item{con}{A connector object, generated by \code{\link{wiki_con}},
that corresponds to the project you're trying to query.}
\item{revisions}{The revision IDs of each "start" revision.}
\item{properties}{Properties you're trying to retrieve about that revision, should you want to;
options include "ids" (the revision ID of the revision...which is pointless),
"flags" (whether the revision was 'minor' or not), "timestamp" (the timestamp of the revision,
which can be parsed with \code{\link{wiki_timestamp}}),"user" (the username of the person
who made that revision), "userid" (the userID of the person who made the revision),
"size" (the size, in uncompressed bytes, of the revision), "sha1" (the SHA-1 hash of
the revision text), "contentmodel" (the content model of the page, usually "wikitext"),
"comment" (the revision summary associated with the revision), "parsedcomment" (the same,
but parsed, generating HTML from any wikitext in that comment), "tags" (any tags associated
with the revision) and "flagged" (the revision's status under Flagged Revisions).}
\item{direction}{The direction you want the diff to go in from the revisionID you have provided.
Options are "prev" (compare to the previous revision on that page), "next" (compare to the next
revision on that page) and "cur" (compare to the current, extant version of the page).}
}
\description{
wiki_diff generates a diff between two revisions in a MediaWiki page.
This is provided as an XML-parsable blob inside the returned JSON object.
}
\section{Warnings}{
MediaWiki's API is deliberately designed to restrict users' ability to make computing-intense requests
- such as diff computation. As a result, the API only allows requests for one uncached diff in
each request. If you ask for multiple diffs, some uncached and some cached, you will be provided
with the cached diffs, one of the uncached diffs, and a warning.
If you're going to be asking for a lot of diffs, some of which may not be cached, it may be more
sensible to retrieve the revisions themselves using \code{\link{wiki_revision}} and compute the
diffs yourself.
}
|
fad0d60043038130df3d8e202bbef228a3e970a7 | 039c4f0bd986bd9f9035725062d176e60b376b93 | /man/weights.Rd | f1abbaa41946a0b07da4630b041aa795104ae334 | [] | no_license | northeastloon/gemrtables | 05170f7402ae0fcd3dcc7f390dbb292d199bf8e5 | 2c34427bbe3d325a7e8cb1e4caa0592113d9db18 | refs/heads/master | 2020-03-23T09:54:18.824781 | 2018-10-18T15:30:31 | 2018-10-18T15:30:31 | 141,414,067 | 1 | 1 | null | 2019-12-17T05:11:29 | 2018-07-18T09:39:47 | R | UTF-8 | R | false | true | 561 | rd | weights.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge_files.R
\name{weights}
\alias{weights}
\title{weights}
\usage{
weights()
}
\description{
\code{weights} is a function to import and clean weights data
}
\details{
Defines SDMX queries to the UIS / UN APIs and applies the `weights_clean`
function
}
\seealso{
\code{\link{weights_clean}}
Other import/clean: \code{\link{cedar}},
\code{\link{inds}}, \code{\link{other}},
\code{\link{region_groups2}},
\code{\link{region_groups}}, \code{\link{uis}}
}
\concept{import/clean}
|
4d74bcb04ab617015fed45024a38a36b98628895 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/mnis/man/constituency_results_tidy.Rd | df0f2c1c2164f05ba4a8f65d4e247d600cde46ec | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 386 | rd | constituency_results_tidy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mnis_tidy.R
\name{constituency_results_tidy}
\alias{constituency_results_tidy}
\title{constituency_results_tidy}
\usage{
constituency_results_tidy(results, details)
}
\arguments{
\item{results}{The tibble to tidy}
\item{details}{The list to tidy}
}
\description{
constituency_results_tidy
}
|
05934810ddd3d8abb7ef77205860949de2948df3 | 1c111e728fc5a8092a550adbb2f366788cbd0b53 | /get_season_results.R | 3f6248f74bd24b9fb64a283fd7bdf5c9fab5ce27 | [] | no_license | segoldma/CFB | 6eca654f699cbe065d460174815400c684c290ec | e8aa44457ffe436d45739579943c90342c367c0e | refs/heads/master | 2021-06-26T13:27:17.635046 | 2019-08-18T20:33:41 | 2019-08-18T20:33:41 | 100,128,589 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 589 | r | get_season_results.R | library(rvest)
library(dplyr)
library(lubridate)
GetSeasonResults <- function(year = lubridate::year(lubridate::now())){
year <- as.numeric(year)
url <- paste0("https://www.sports-reference.com/cfb/years/", year, "-schedule.html")
season_results <- read_html(url) %>%
html_nodes("#schedule") %>%
html_table(fill = TRUE) %>%
as.data.frame() %>%
rename("At" = `Var.8`,
"W.Pts" = `Pts`,
"L.Pts" = `Pts.1`)
assign(x = paste0("season_results_",year), season_results, envir = .GlobalEnv)
}
# Try it out
GetSeasonResults(2014)
|
95a6b60adfaf2e482159e3ccf2670d085438aca8 | 8dc7c48e822815eb71af789e4a97c229c0ab8ecd | /man/IdfViewer.Rd | 5169cc1420ee28c4cab96c141f29a21f584ec303 | [
"MIT"
] | permissive | hongyuanjia/eplusr | 02dc2fb7eaa8dc9158fe42d060759e16c62c6b47 | 4f127bb2cfdb5eb73ef9abb545782f1841dba53a | refs/heads/master | 2023-08-31T02:49:26.032757 | 2023-08-25T15:21:56 | 2023-08-25T15:21:56 | 89,495,865 | 65 | 13 | NOASSERTION | 2023-08-24T02:05:22 | 2017-04-26T15:16:34 | R | UTF-8 | R | false | true | 24,121 | rd | IdfViewer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viewer.R
\docType{class}
\name{IdfViewer}
\alias{IdfViewer}
\alias{idf_viewer}
\title{Visualize an EnergyPlus Model Geometry and Simulation Results}
\usage{
idf_viewer(geometry)
}
\arguments{
\item{geometry}{An \link{IdfGeometry} object. \code{geometry} can also be a
path to an IDF file or an \link{Idf} object. In this case, an
\code{IdfGeometry} is created based on input \link{Idf}.}
}
\value{
An \code{IdfViewer} object.
}
\description{
\code{IdfViewer} is a class designed to view geometry of an \link{Idf} and map
simulation results to the geometries.
}
\examples{
## ------------------------------------------------
## Method `IdfViewer$new`
## ------------------------------------------------
\dontrun{
# example model shipped with eplusr from EnergyPlus v8.8
path_idf <- system.file("extdata/1ZoneUncontrolled.idf", package = "eplusr") # v8.8
# create from an Idf object
idf <- read_idf(path_idf, use_idd(8.8, "auto"))
viewer <- idf_viewer(idf)
viewer <- IdfViewer$new(idf)
# create from an IDF file
viewer <- idf_viewer(path_idf)
viewer <- IdfViewer$new(path_idf)
}
## ------------------------------------------------
## Method `IdfViewer$parent`
## ------------------------------------------------
\dontrun{
viewer$parent()
}
## ------------------------------------------------
## Method `IdfViewer$geometry`
## ------------------------------------------------
\dontrun{
viewer$geometry()
}
## ------------------------------------------------
## Method `IdfViewer$device`
## ------------------------------------------------
\dontrun{
viewer$device()
}
## ------------------------------------------------
## Method `IdfViewer$background`
## ------------------------------------------------
\dontrun{
viewer$background("blue")
}
## ------------------------------------------------
## Method `IdfViewer$viewpoint`
## ------------------------------------------------
\dontrun{
viewer$viewpoint()
}
## ------------------------------------------------
## Method `IdfViewer$win_size`
## ------------------------------------------------
\dontrun{
viewer$win_size(0, 0, 400, 500)
}
## ------------------------------------------------
## Method `IdfViewer$mouse_mode`
## ------------------------------------------------
\dontrun{
viewer$mouse_mode()
}
## ------------------------------------------------
## Method `IdfViewer$axis`
## ------------------------------------------------
\dontrun{
viewer$axis()
}
## ------------------------------------------------
## Method `IdfViewer$ground`
## ------------------------------------------------
\dontrun{
viewer$ground()
}
## ------------------------------------------------
## Method `IdfViewer$wireframe`
## ------------------------------------------------
\dontrun{
viewer$wireframe()
}
## ------------------------------------------------
## Method `IdfViewer$x_ray`
## ------------------------------------------------
\dontrun{
viewer$x_ray()
}
## ------------------------------------------------
## Method `IdfViewer$render_by`
## ------------------------------------------------
\dontrun{
viewer$render_by()
}
## ------------------------------------------------
## Method `IdfViewer$show`
## ------------------------------------------------
\dontrun{
viewer$show()
}
## ------------------------------------------------
## Method `IdfViewer$focus`
## ------------------------------------------------
\dontrun{
viewer$top()
}
## ------------------------------------------------
## Method `IdfViewer$close`
## ------------------------------------------------
\dontrun{
viewer$close()
}
## ------------------------------------------------
## Method `IdfViewer$snapshot`
## ------------------------------------------------
\dontrun{
viewer$show()
viewer$snapshot(tempfile(fileext = ".png"))
}
## ------------------------------------------------
## Method `IdfViewer$print`
## ------------------------------------------------
\dontrun{
viewer$print()
}
}
\seealso{
\link{IdfGeometry} class
}
\author{
Hongyuan Jia
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-IdfViewer-new}{\code{IdfViewer$new()}}
\item \href{#method-IdfViewer-parent}{\code{IdfViewer$parent()}}
\item \href{#method-IdfViewer-geometry}{\code{IdfViewer$geometry()}}
\item \href{#method-IdfViewer-device}{\code{IdfViewer$device()}}
\item \href{#method-IdfViewer-background}{\code{IdfViewer$background()}}
\item \href{#method-IdfViewer-viewpoint}{\code{IdfViewer$viewpoint()}}
\item \href{#method-IdfViewer-win_size}{\code{IdfViewer$win_size()}}
\item \href{#method-IdfViewer-mouse_mode}{\code{IdfViewer$mouse_mode()}}
\item \href{#method-IdfViewer-axis}{\code{IdfViewer$axis()}}
\item \href{#method-IdfViewer-ground}{\code{IdfViewer$ground()}}
\item \href{#method-IdfViewer-wireframe}{\code{IdfViewer$wireframe()}}
\item \href{#method-IdfViewer-x_ray}{\code{IdfViewer$x_ray()}}
\item \href{#method-IdfViewer-render_by}{\code{IdfViewer$render_by()}}
\item \href{#method-IdfViewer-show}{\code{IdfViewer$show()}}
\item \href{#method-IdfViewer-focus}{\code{IdfViewer$focus()}}
\item \href{#method-IdfViewer-close}{\code{IdfViewer$close()}}
\item \href{#method-IdfViewer-snapshot}{\code{IdfViewer$snapshot()}}
\item \href{#method-IdfViewer-print}{\code{IdfViewer$print()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-new"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-new}{}}}
\subsection{Method \code{new()}}{
Create an \code{IdfViewer} object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$new(geometry)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{geometry}}{An \link{IdfGeometry} object. \code{geometry} can also be a
path to an IDF file or an \link{Idf} object. In this case, an
\code{IdfGeometry} is created based on input \link{Idf}.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
An \code{IdfViewer} object.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
# example model shipped with eplusr from EnergyPlus v8.8
path_idf <- system.file("extdata/1ZoneUncontrolled.idf", package = "eplusr") # v8.8
# create from an Idf object
idf <- read_idf(path_idf, use_idd(8.8, "auto"))
viewer <- idf_viewer(idf)
viewer <- IdfViewer$new(idf)
# create from an IDF file
viewer <- idf_viewer(path_idf)
viewer <- IdfViewer$new(path_idf)
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-parent"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-parent}{}}}
\subsection{Method \code{parent()}}{
Get parent \link{Idf} object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$parent()}\if{html}{\out{</div>}}
}
\subsection{Details}{
\verb{$parent()} returns the parent \link{Idf} object of current \code{IdfGeometry}
object.
}
\subsection{Returns}{
An \link{Idf} object.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$parent()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-geometry"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-geometry}{}}}
\subsection{Method \code{geometry()}}{
Get parent \link{IdfGeometry} object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$geometry()}\if{html}{\out{</div>}}
}
\subsection{Details}{
\verb{$geometry()} returns the parent \link{IdfGeometry} object.
}
\subsection{Returns}{
An \link{IdfGeometry} object.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$geometry()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-device"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-device}{}}}
\subsection{Method \code{device()}}{
Get Rgl device ID
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$device()}\if{html}{\out{</div>}}
}
\subsection{Details}{
If Rgl is used, the Rgl device ID is returned. If WebGL is
used, the \code{elementID} is returned. If no viewer has been open, \code{NULL}
is returned.
}
\subsection{Returns}{
A number or \code{NULL}
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$device()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-background"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-background}{}}}
\subsection{Method \code{background()}}{
Set the background color of the scene
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$background(color = "white")}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{color}}{A single string giving the background color. Default:
\code{white}.}
}
\if{html}{\out{</div>}}
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$background("blue")
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-viewpoint"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-viewpoint}{}}}
\subsection{Method \code{viewpoint()}}{
Set the viewpoint orientation of the scene
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$viewpoint(
look_at = "iso",
theta = NULL,
phi = NULL,
fov = NULL,
zoom = NULL,
scale = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{look_at}}{A single string indicating a standard view. If
specified, \code{theta} and \code{phi} will be ignored. Should be
one of \code{c("top", "bottom", "left", "right", "front", "back", "iso")}. \code{look_at} will be ignored if any of \code{theta} and \code{phi}
is specified. Default: \code{iso} (i.e. isometric).}
\item{\code{theta}}{Theta in polar coordinates. If \code{NULL}, no changes will
be made to current scene. Default: \code{NULL}.}
\item{\code{phi}}{Phi in polar coordinates. If \code{NULL}, no changes will be
made to current scene. Default: \code{NULL}.}
\item{\code{fov}}{Field-of-view angle in degrees. If \code{0}, a parallel or
orthogonal projection is used. If \code{NULL}, no changes will
be made to current scene. Default: \code{NULL}.}
\item{\code{zoom}}{Zoom factor. If \code{NULL}, no changes will be made to
current scene. Default: \code{NULL}.}
\item{\code{scale}}{A numeric vector of length 3 giving the rescaling to
apply to each axis. If \code{NULL}, no changes will be made to
current scene. Default: \code{NULL}.}
}
\if{html}{\out{</div>}}
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$viewpoint()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-win_size"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-win_size}{}}}
\subsection{Method \code{win_size()}}{
Set the window size
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$win_size(left = 0, top = 0, right = 600, bottom = 600)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{left, top, right, bottom}}{A single number indicating the pixels of
the displayed window. Defaults: \code{0} (\code{left}), \code{0} (\code{top}),
\code{600} (\code{right}) and \code{600} (\code{bottom}).}
}
\if{html}{\out{</div>}}
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$win_size(0, 0, 400, 500)
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-mouse_mode"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-mouse_mode}{}}}
\subsection{Method \code{mouse_mode()}}{
Set the handlers of mouse control
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$mouse_mode(
left = "trackball",
right = "pan",
middle = "fov",
wheel = "pull"
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{left, right, middle}}{Refer to the buttons on a three button
mouse, or simulations of them on other mice. Defaults:
\code{"trackball"} (\code{left}), \code{"pan"} (\code{right}) and \code{"fov"}
(\code{middle}).}
\item{\code{wheel}}{Refer to the mouse wheel. Default: \code{"pull"}.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
Possible values are:\tabular{ll}{
Mode \tab Description \cr
"none" \tab No action \cr
"trackball" \tab The mouse acts as a virtual trackball. Clicking and dragging rotates the scene \cr
"xAxis", "yAxis", "zAxis" \tab Like "trackball", but restricted to rotation about one axis \cr
"polar" \tab The mouse affects rotations by controlling polar coordinates directly \cr
"zoom" \tab The mouse zooms the display \cr
"fov" \tab The mouse affects perspective by changing the field of view \cr
"pull" \tab Rotating the mouse wheel towards the user “ pulls the scene closer” \cr
"push" \tab The same rotation “pushes the scene away” \cr
"pan" \tab Pan the camera view vertically or horizontally \cr
}
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$mouse_mode()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-axis"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-axis}{}}}
\subsection{Method \code{axis()}}{
Toggle axis in the scene
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$axis(
add = TRUE,
expand = 2,
width = 1.5,
color = c("red", "green", "blue", "orange"),
alpha = 1
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{add}}{If \code{TRUE}, axis is added to the scene. If \code{FALSE}, axis is
removed in the scene.}
\item{\code{expand}}{A single number giving the factor to expand based on
the largest X, Y and Z coordinate values. Default: \code{2.0}.}
\item{\code{width}}{A number giving the line width of axis. \code{width * 2} is
used for the true north axis. Default: \code{1.5}.}
\item{\code{color}}{A character of length 4 giving the color of X, Y, Z and
true north axis. Default: \code{c("red", "green", "blue", "orange")}.}
\item{\code{alpha}}{A number giving the alpha value of axis. Default: \code{1.0}.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
\verb{$axis()} adds or removes X, Y and Z axis in the scene.
}
\subsection{Returns}{
A single logical value as \code{add}.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$axis()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-ground"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-ground}{}}}
\subsection{Method \code{ground()}}{
Toggle ground in the scene
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$ground(add = TRUE, expand = 1.02, color = "#EDEDEB", alpha = 1)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{add}}{If \code{TRUE}, ground is added to the scene. If \code{FALSE},
ground is removed in the scene.}
\item{\code{expand}}{A single number giving the factor to expand based on
the largest X, Y and Z coordinate values. Default: \code{1.02}.}
\item{\code{color}}{A string giving the color of ground. Default: \verb{#EDEDEB}.}
\item{\code{alpha}}{A number giving the alpha value of ground. Default: \code{1.0}.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
\verb{$ground()} adds or removes ground in the scene.
}
\subsection{Returns}{
A single logical value as \code{add}.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$ground()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-wireframe"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-wireframe}{}}}
\subsection{Method \code{wireframe()}}{
Toggle wireframe
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$wireframe(add = TRUE, width = 1.5, color = "black", alpha = 1)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{add}}{If \code{TRUE}, wireframe is turned on. If \code{FALSE}, wireframe
is turned off. Default: \code{TRUE}.}
\item{\code{width}}{A number giving the line width of axis. Default: \code{1.5}.}
\item{\code{color}}{A character of length 3 giving the color of X, Y and Z
axis. Default: \code{c("red", "green", "blue")}.}
\item{\code{alpha}}{A number giving the alpha value of axis. Default: \code{1.0}.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
\verb{$wireframe()} turns on/off wireframes.
}
\subsection{Returns}{
A single logical value as \code{add}.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$wireframe()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-x_ray"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-x_ray}{}}}
\subsection{Method \code{x_ray()}}{
Toggle X-ray face style
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$x_ray(on = TRUE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{on}}{If \code{TRUE}, X-ray is turned on. If \code{FALSE}, X-ray is turned
off. Default: \code{TRUE}.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
\verb{$x_ray()} turns on/off X-ray face style.
}
\subsection{Returns}{
A single logical value as \code{on}.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$x_ray()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-render_by"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-render_by}{}}}
\subsection{Method \code{render_by()}}{
Set render style
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$render_by(type = "surface_type")}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{type}}{A single string giving the render style. Should be one
of:
\itemize{
\item \code{"surface_type"}: Default. Render the model by surface type
model. Walls, roofs, windows, doors, floors, and shading
surfaces will have unique colors.
\item \code{"boundary"}: Render the model by outside boundary condition.
Only surfaces that have boundary conditions will be rendered
with a color. All other surfaces will be white.
\item \code{"construction"}: Render the model by surface constructions.
\item \code{"zone"}: Render the model by zones assigned.
\item \code{"space"}: Render the model by spaces assigned.
\item \code{"normal"}: Render the model by surface normal. The outside
face of a heat transfer face will be rendered as white and the
inside face will be rendered as red.
}}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
\verb{$render_by()} sets the render style of geometries.
}
\subsection{Returns}{
A same value as \code{style}.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$render_by()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-show"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-show}{}}}
\subsection{Method \code{show()}}{
Show \link{Idf} geometry
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$show(
type = "all",
zone = NULL,
space = NULL,
surface = NULL,
width = 1.5,
dayl_color = "red",
dayl_size = 5
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{type}}{A character vector of geometry components to show. If
\code{"all"} (default), all geometry components will be shown. If
\code{NULL}, no geometry faces will be shown. Otherwise, should be
a subset of following:
\itemize{
\item \code{"floor"}
\item \code{"wall"}
\item \code{"roof"}
\item \code{"window"}
\item \code{"door"}
\item \code{"shading"}
\item \code{"daylighting"}
}}
\item{\code{zone}}{A character vector of names or an integer vector of IDs
of zones in current \link{Idf} to show. If \code{NULL}, no subsetting is
performed.}
\item{\code{space}}{A character vector of names or an integer vector of IDs
of spaces in current \link{Idf} to show. If \code{NULL}, no subsetting is
performed.}
\item{\code{surface}}{A character vector of names or an integer vector of IDs
of surfaces in current \link{Idf} to show. If \code{NULL}, no subsetting
is performed.}
\item{\code{width}}{The line width for the geometry components. Default:
\code{1.5}.}
\item{\code{dayl_color, dayl_size}}{The color and size of daylighting
reference points. Defaults: \code{"red"} (\code{dayl_color}) and \code{5}
(\code{dayl_size}).}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
The \code{IdfViewer} itself, invisibly.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$show()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-focus"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-focus}{}}}
\subsection{Method \code{focus()}}{
Bring the scene window to the top
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$focus()}\if{html}{\out{</div>}}
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$top()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-close"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-close}{}}}
\subsection{Method \code{close()}}{
Close the scene window
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$close()}\if{html}{\out{</div>}}
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$close()
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-snapshot"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-snapshot}{}}}
\subsection{Method \code{snapshot()}}{
Capture and save current rgl view as an image
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$snapshot(filename, webshot = FALSE, ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{filename}}{A single string specifying the file name. Current
supported formats are \code{png}, \code{pdf}, \code{svg}, \code{ps}, \code{eps}, \code{tex}
and \code{pgf}.}
\item{\code{webshot}}{Whether to use the 'webshot2' package to take the
snapshot. For more details, please see \code{\link[rgl:snapshot]{rgl::snapshot3d()}}.
Default: \code{FALSE}.}
\item{\code{...}}{Arguments to pass to \code{webshot2::webshot()}.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
\verb{$snapshot()} captures the current rgl view and saves it as an image
file to disk using \code{\link[rgl:snapshot]{rgl::snapshot3d()}} and \code{\link[rgl:postscript]{rgl::rgl.postscript()}}.
}
\subsection{Returns}{
A single string of the file path.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$show()
viewer$snapshot(tempfile(fileext = ".png"))
}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-IdfViewer-print"></a>}}
\if{latex}{\out{\hypertarget{method-IdfViewer-print}{}}}
\subsection{Method \code{print()}}{
Print an \code{IdfViewer} object
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{IdfViewer$print()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
The \code{IdfViewer} itself, invisibly.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{\dontrun{
viewer$print()
}
}
\if{html}{\out{</div>}}
}
}
}
|
eae86737d43bd0aef6bb061a97053c035b093639 | dbfff8a25b2ee0abfbced4eafa9c2aeae12488f5 | /sv/analysis/IlluminaCallers/individual_caller.R | b2e1e5dda7f17ba4b5f4f1df6946f9b622ef0d1c | [] | no_license | mchaisso/hgsvg | ba1642ea025d244b20b49396aa96d336aea83aa1 | 7d0f01835512a78ae41dbb3c7094575b09c217d0 | refs/heads/master | 2020-03-21T13:37:43.438894 | 2018-06-21T22:39:50 | 2018-06-21T22:39:50 | 138,616,635 | 1 | 0 | null | 2018-06-25T15:49:25 | 2018-06-25T15:49:25 | null | UTF-8 | R | false | false | 5,016 | r | individual_caller.R | library(getopt)
options <- matrix(c("sv", "v", 2, "character",
"count", "c", 2, "character",
"operation", "o", "op", "character",
"sample", "s", 2, "character"), byrow=T, ncol=4)
args <- getopt(options)
#
#setwd("/net/eichler/vol24/projects/structural_variation/nobackups/projects/HGSVG/analysis/IlluminaCombined/HG00514")
#args <- data.frame(sv="int_caller_full.INS.bed",count="callers.INS.tab", sample="HG00514", operation="INS")
callTab <- read.table(as.character(args$sv),header=T,comment.char="")
callNames <- names(callTab)
firstName <- which(callNames == "tEnd.1")[1]+1
lastName <- which(callNames == "orth_filter.1")[1]-1
countTab <- read.table(as.character(args$count),header=T,comment.char="")
nSamples <- lastName - firstName + 1
countNames <- names(countTab)
firstCountName <- which(countNames == "tEnd")[1]+1
lastCountName <- length(countNames)
allCounts <- apply(countTab[,firstCountName: lastCountName],2,sum)
#
# First pca of all values
tpca <- prcomp(t(callTab[,firstName:lastName]), center=T, scale=T)
tabNames <- names(callTab[,firstName:lastName])
#
# Counts table
tabAllCounts <- sapply(tabNames, function(n) if (is.na(allCounts[n])) { return(0) } else { return(allCounts[n]) } )
library(gdsfmt)
library(ggrepel)
n <- length(colnames(tpca$x))
colnames(tpca$x) <- paste("c",seq(1,n),sep="")
pcaDF <- as.data.frame(tpca$x)
pcaSummary <- summary(tpca)
require(gridExtra)
pdf(sprintf("MethodPCA.%s.%s.pdf", args$operation, args$sample,sep=""), width=12,height=6)
p1 <- ggplot(pcaDF, aes(x=c1,y=c2)) + geom_point(color = 'black') + geom_text_repel(aes(label = tabNames)) + xlab(sprintf("PC 1 %2.2f%% variance ",100*pcaSummary$importance[2,1]) ) + ylab(sprintf("PC 2 %2.2f%% variance ", 100*pcaSummary$importance[2,2])) + labs(title=sprintf("%s, Illumina combined %s", args$sample, args$operation)) + theme_bw()
p2 <- ggplot(pcaDF, aes(x=c2,y=c3)) + geom_point(color = 'black') + geom_text_repel(aes(label = tabNames)) + xlab(sprintf("PC 2 %2.2f%% variance ",100*pcaSummary$importance[2,2]) ) + ylab(sprintf("PC 3 %2.2f%% variance ", 100*pcaSummary$importance[2,3])) + labs(title=sprintf("%s, Illumina combined %s", args$sample, args$operation)) + theme_bw()
grid.arrange(p1,p2,ncol=2)
dev.off()
print("done plotting pca")
library(lsa)
library(RColorBrewer)
library(lattice)
library(proxy)
tabPass <- callTab[which(callTab$orth_filter == "PASS"),]
tabFail <- callTab[which(callTab$orth_filter == "FAIL"),]
cmat <- cosine(as.matrix(tabPass[,firstName:lastName]))
jmat <- dist(t(tabPass[,firstName:lastName]), method="Jaccard", pairwise=T)
reds <- brewer.pal(9, "RdBu")
cr <- colorRamp(reds)
rampCol <- rgb(cr(seq(0,1,by=0.01))/255)
#levelplot(as.matrix(jmat), col.regions=rampCol)
#heatmap(as.matrix(jmat), symm=T, col=rampCol)
jmat <- dist(t(callTab[,firstName:lastName]), method="Jaccard", pairwise=T)
#reds <- brewer.pal(9, "Blues")
cr <- colorRamp(reds)
n <- dim(jmat)[1]
cl <- hclust(dist(as.matrix(jmat)))
pdf(sprintf("Jaccard.%s.%s.pdf",args$operation, args$sample))
levelplot(as.matrix(jmat)[cl$order,cl$order], col.regions=rampCol,scales=list(x=list(rot=90)), xlab="", ylab="", main=sprintf("Jaccard similarity %s %s",args$sample, args$operation) )
dev.off()
#hm <- heatmap(as.matrix(jmat), symm=T, col=rampCol,plot=F)
passSum <- apply(tabPass[,firstName:lastName],2,sum)
callsSum <- apply(callTab[,firstName:lastName],2,sum)
pdf(sprintf("MethodsBar.%s.%s.pdf",args$operation, args$sample),width=8,height=4)
bpx <- barplot(rbind(passSum, callsSum-passSum), names.arg=tabNames,col=c("black","red"), main=sprintf("%s %s", args$sample, args$operation), xaxt="n")
mc <- max(callsSum)
text(cex=1, x=bpx-.25, y=-mc*0.15, tabNames, xpd=TRUE, srt=45,pos=1)
legend("topright", legend=c("Confirmed", "Unconfirmed"), pch=22, pt.bg=c("black","red"), pt.cex=2)
dev.off()
passTab <- rbind(passSum, callsSum, 100*passSum/callsSum)
rownames(passTab) <- c("confirmed", "total", "fraction")
methodSummary <- sprintf("MethodSummary.%s.%s.tsv",args$operation,args$sample)
write.table(passTab, methodSummary, sep="\t", quote=F)
#apply(tab[,4:16],2,length)
#
#
#tabins <- read.table("int_margin.INS.bed",header=T,comment.char="")
#
#tpca <- prcomp(t(tab[,4:16]), center=T, scale=T)
#names(tpca)
#library(gdsfmt)
#dim(tpca$x)
#library(ggrepel)
#plot(tpca$x[,1], tpca$x[,2])
#
#names(tab)
#df <- as.data.frame(tpca$x)
#
#
#colnames(tpca$x) <- paste("c",seq(1,13),sep="")
#
#
#
#names <- colnames(tab)[4:16]
#
#tpca <- prcomp(t(tabPass[,4:16]), center=T, scale=T)
#df <- as.data.frame(tpca$x)
#pdf("MethodPCA.NA1940.pass.pdf")
#
#ggplot(df, aes(x=PC1,y=PC2)) +
# geom_point(color = 'black') +
# geom_text_repel(aes(label = colnames(tab)[4:16])) + xlab(sprintf("PC 1 %2.2f%% variance ",100*s$importance[2][1]) ) + ylab(sprintf("PC 2 %2.2f%% variance ", 100*s$importance[2][1])) + labs(title="NA19240, Filtered")
#de.vooff()
#
|
2efcd4290ac59f058c3bf8fae4cca397ea6a6bb1 | 5e5e3f1aed30feb2de02bd5b09445718a36a4967 | /data_quality.R | 35ac4e275ba61fd84bb11dbf5744f99a47d0152d | [] | no_license | basselus/EDA | ce65c2152e8fa16ce5309b7b6ecc3eae2726b1e6 | 076420883ae38c2fcd601d7da5b968092646587b | refs/heads/master | 2021-07-17T16:42:09.578301 | 2021-06-15T14:50:18 | 2021-06-15T14:50:18 | 108,565,515 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,764 | r | data_quality.R |
# 1-chargement des données
data=read.csv("data_hotels.csv", header = T, sep=";", na.strings = c("","NA"))
str(data)
#********************************************************************************
#Problème de qualité numéro 1 : informations manquantes sur le nombre de chambres
#********************************************************************************
#On crée une variable d'évaluation de la métrique
EVAL_ROOMS=data$EVAL_ROOMS
data$EVAL_ROOMS[which(is.na(data$NBCHAMBRES))]<-"missing"
data$EVAL_ROOMS[which(!is.na(data$NBCHAMBRES))]<-"non_missing"
#On Visualise le pourcentage d'hotels avec des données manquantes sur la variable nombre de chambres
data<-within(data,
EVAL_ROOMS<-factor(EVAL_ROOMS,
levels = names(sort(table(EVAL_ROOMS),
decreasing = TRUE))))
counts=table(data$EVAL_ROOMS)
relfreq=counts/sum(counts)
relfreq
vec.col1=c("blue","yellow")
barplot(relfreq, col=vec.col1, names.arg = levels(data$EVAL_ROOMS), main = "données manquantes sur le nombre de chambres",
ylab="données manquantes en %", las = 1,
cex.names=0.8,
font.axis = 2)
#*******************************************************************************************************
#Problème de qualité numéro 2 : existence de plusieurs liens dans le champ photo
#*******************************************************************************************************
# On crée une sous table links pour traiter les liens photos
library(stringr)
data$PHOTOS_2=as.character(data$PHOTOS)
links=data.frame(str_split_fixed(data$PHOTOS_2, ":", 3))
#On supprime les lignes des hotels sans aucun lien d'images
links$X1=as.character(links$X1)
links$X1[links$X1==""]<-NA
links$X1<-as.factor(links$X1)
which(is.na(links$X1))
links=links[-c(76, 96, 109, 126, 152, 153, 157, 161, 164, 167, 170, 174),]
# Traitement préalable des NA pour filtrer les hotels ayant un seul lien d'image
links$X3=as.character(links$X3)
links$X3[links$X3==""]<-NA
links$X3<-as.factor(links$X3)
#On crée une variable de décompte des hotels avec un seul lien d'image
lien_uniq=links$lien_uniq
#filtres conditionnels avec which
links$X3=as.character(links$X3)
links$lien_uniq[which(is.na(links$X3))]<-"one link" # si la colonne X3 est vide, cela veut dire qu'il ya un seul lien
links$lien_uniq[which(!is.na(links$X3))]<-"several links" # si la colonne X3 n'est pas vide cela veut dire qu'il yen a plusieurs
#On Visualise le pourcentage de d'hôtels avec plusieurs liens vs celui avec un seul lien d'image
links<-within(links,
lien_uniq<-factor(lien_uniq,
levels = names(sort(table(lien_uniq),
decreasing = TRUE))))
counts=table(links$lien_uniq)
relfreq=counts/sum(counts)
vec.col2=c("blue","lightblue")
barplot(relfreq, col=vec.col2, names.arg =levels(links$lien_uniq), main = "pourcentage de lignes avec plusieurs url ou 1 url
",
ylab="résultats en %", las = 1,
cex.names=0.8,
font.axis = 2)
#*******************************************************************************************************
#Problème de qualité numéro 3 : la non correspondance entre le nom de domaine et l'adresse de messagerie
#*******************************************************************************************************
library(data.table)
library(stringr)
#Construction de la sous-table mails pour traiter le problème des mails
x=as.vector(data$WEB)
mailmatch=data.frame(t(do.call("cbind",strsplit(as.character(data$MAIL),"@"))))
mails=data.frame(cbind(x,mailmatch))#
mails$X1<-NULL#
mails=setnames(mails,old=c("x","X2"), new=c("web","messagerie"))
#On crée une variable secure avec un filtre conditionnel:
# 1- si une partie des caractères du site web se trouve dans les caractères de messagerie= TRUE
# 2- s'il n'ya pas correspondance entre les 2 on met =FALSE
mails$secure=
with(mails,
str_detect(as.character(web), as.character(messagerie))
)
# #On Visualise le pourcentage d'hotels concernés par ce problème de sécurité
# des données personnelles :
mails<-within(mails,
secure<-factor(secure,
levels = names(sort(table(secure),
decreasing = TRUE))))
counts=table(mails$secure)
relfreq=counts/sum(counts)
vec.col3=c("blue","green")
barplot(relfreq, col=vec.col3, names.arg =levels(mails$secure), main = "pourcentage d'hotels avec domaine de messagerie propre",
ylab="résultats en %", las = 1,
cex.names=0.8,
font.axis = 2)
|
8f317db016d2e33e3b755dca33225424871b15fd | eb1f09729cdfb035b1b67afc9133a73fb0cf6f67 | /tests/testthat/test-parallel.R | a3a680611c820bfdccff26b3010a3f8400990393 | [
"MIT"
] | permissive | ashbythorpe/nestedmodels | 25993d66a18a19e5aa9f2fe29c823bc74af74ddc | ccda3e5a7c5ccbdb1e993d1fcd2c58dab943f55f | refs/heads/main | 2023-05-23T03:48:33.871797 | 2023-03-19T23:48:24 | 2023-03-19T23:48:24 | 538,706,986 | 5 | 3 | NOASSERTION | 2023-03-22T19:24:24 | 2022-09-19T21:41:58 | R | UTF-8 | R | false | false | 1,861 | r | test-parallel.R | test_that("Fitting works in parallel", {
skip_if_not_installed("withr")
skip_if_not_installed("parallel")
skip_if_not_installed("doParallel")
withr::defer({
doParallel::stopImplicitCluster()
foreach::registerDoSEQ()
})
foreach::registerDoSEQ()
model <- parsnip::linear_reg() %>%
parsnip::set_engine("lm") %>%
nested(allow_par = TRUE)
expect_false(allow_parallelism(model$eng_args$allow_par, model))
nested_data <- tidyr::nest(example_nested_data, data = -id)
fit_1 <- fit(model, z ~ x + y + a + b, nested_data)
preds_1 <- predict(fit_1, example_nested_data)
cl <- parallel::makePSOCKcluster(2)
doParallel::registerDoParallel(cl)
expect_true(allow_parallelism(model$eng_args$allow_par, model))
fit_2 <- fit(model, z ~ x + y + a + b, nested_data)
preds_2 <- predict(fit_2, example_nested_data)
expect_equal(preds_1, preds_2)
parallel::stopCluster(cl)
})
test_that("Fitting workflows works in parallel", {
skip_if_not_installed("withr")
skip_if_not_installed("parallel")
skip_if_not_installed("doParallel")
skip_if_not_installed("workflows")
withr::defer({
doParallel::stopImplicitCluster()
foreach::registerDoSEQ()
})
foreach::registerDoSEQ()
model <- parsnip::linear_reg() %>%
nested(allow_par = TRUE)
recipe <- recipes::recipe(example_nested_data, z ~ .) %>%
step_nest(id, id2)
wf <- workflows::workflow() %>%
workflows::add_model(model) %>%
workflows::add_recipe(recipe)
fit_1 <- fit(wf, example_nested_data)
preds_1 <- predict(fit_1, example_nested_data)
cl <- parallel::makePSOCKcluster(2)
doParallel::registerDoParallel(cl)
fit_2 <- fit(wf, example_nested_data)
preds_2 <- predict(fit_2, example_nested_data)
expect_equal(preds_1, preds_2)
parallel::stopCluster(cl)
})
|
c718c348bf4cf89da6aac458dbbfb6a468b096e2 | 2dde5edca28c49fcc62f56b1903c0e9ac584f7bc | /basic_commands.R | dc261a7a615bc58d2a587f657ce4f4f1a21906ab | [] | no_license | KudasaiCode/R-practice | 8f7db080d8b55daee89c694cd82c3b4d4575e72f | 9dcb8ae930d21fff42cf837654a39129f1a91029 | refs/heads/master | 2020-05-17T19:37:42.295121 | 2019-04-28T14:37:35 | 2019-04-28T14:37:35 | 183,919,909 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,093 | r | basic_commands.R | # c() concatenates
# arguments are a 1D vector
x = c(1,2,3,4)
y = c(11, 12, 13, 14)
length(x)
length(y)
# length = 4
z = x+y
z
# ls() allows us to see
# all saved objects in memory
ls() # so far "x", "y", and "z" are in memory
# rm() removes objects
rm(y)
ls() # only "x" and "z" are objects now
# Removes all object at once
rm(list = ls())
ls() # >character(0) there are no objects in memory
########
# matrix()
m = matrix(data=c(10,11,12,13,14,15), nrow=3, ncol=2)
m
# the 'data= , nrow= , ncol= can be ommited'
m2 = matrix(c(1,2,3,4), 2, 2)
m2
# By Default, it adds items by column
# byrow=TRUE argument to change that
m3 = matrix(c(10,20,30,40,50, 60), 3, 2, byrow=TRUE)
m3_bycol = matrix(c(10,20,30,40,50,60), 3, 2)
m3_bycol
ls()
# sqrt()
root_m3 = sqrt(m3)
"rooted"
root_m3
"not rooted"
m3
# ^ exponential
x = 2^2
m = matrix(c(2,12,15,100,14,15,15,13), 4, 2)
"squared matrix"
m_squared = m^2
m_squared
m_rooted = sqrt(m_squared)
"sqrt(squared matrix) gives us back the original"
m_rooted
m_cubed = m^3
"orig matrix cubed"
m_cubed
m_cube_root = m_cubed^(1/3)
m_cube_root |
31b05dc7c0a5cad5a5c4419f4a717bdcfc5448a3 | 84fa82223d5ec9eb87e33602f20a17b7b12d3cfc | /GetGEO.R | 0f2ebf54fa1a71caf9ec1b8e1f9cd691fd8d9051 | [] | no_license | jsacco1/R-bioinformatics | 2d08c475ec9ca2af11abe620c987bcd6a3ef286f | 731823e86a4c85b52c01e9a28aab3f4d7ef171a4 | refs/heads/master | 2023-01-29T21:12:04.145467 | 2020-12-15T23:10:58 | 2020-12-15T23:10:58 | 120,352,445 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 261 | r | GetGEO.R | ---
#title: 'RNA-Seq with knockdown'
#author: "James Sacco"
#date: "`r Sys.Date()`"
#output:
# clear
rm(list=ls())
# load modules
library(GEOquery)
library(exprso)
studyID <- 'GSE159049'
gse = getGEO(GEO = studyID)
gse[[1]]
# get dependencies
sessionInfo()
|
f68c023dfcfb17a79fb4930e2625dc641f0f5dd1 | bf913debfbdb37ef69bb2e68107d39dea967ee65 | /readLog_multi_files.R | 721f0f12579986f2a8a60f5559745b828b58a60c | [] | no_license | hoangvietanh/read_postgre_log | 6c2074fa8ae16136dfb959586f7dc8e077cb34be | afff71d948d6ad7ea1529a8f5969ec306a73575a | refs/heads/master | 2021-01-01T20:13:20.591739 | 2017-07-30T10:06:15 | 2017-07-30T10:06:15 | 98,789,108 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,662 | r | readLog_multi_files.R | # Url
url_root = "http://118.70.184.30:8003/log/"
# Require packages
library(httr)
library(plyr)
library(stringr)
library(RCurl)
# function
read_url = function(url){
get_url = GET(url)
df = read.table(text=content(get_url, as="text"), sep=",", header=TRUE, skip=2)
df = as.data.frame(df)
names(df) = "content"
return(df)
}
# Read multi files
content_url = getURL(url_root, ftp.use.epsv = FALSE, dirlistonly = TRUE)
get_files_name = ldply(str_match_all(content_url, "postgresql-\\d{4}-\\d{2}-\\d{2}_\\d{6}.log"))
names(get_files_name) = "file_name"
get_files_name$root_url = url_root
get_files_name$ulr = paste(get_files_name$root_url, get_files_name$file_name, sep = "")
list_files = split(get_files_name, get_files_name$ulr)
# 02. Subset df
i = 1
n = seq_along(list_files)
list_read = list()
list_subset = list()
list_spl = list()
value = list()
date = list()
time = list()
result = list()
for(i in n){
list_read[[i]] = read_url(list_files[[i]]$ulr)
list_subset[[i]] = list_read[[i]][grepl('.*parameters*', list_read[[i]]$content), ]
list_spl[[i]] = as.data.frame(str_split_fixed(list_subset[[i]], "=", 2))
value[[i]] = list_spl[[i]][2]
date[[i]] = gsub("\\ .*","",list_spl[[i]]$V1)
time[[i]] = ldply(str_match_all(list_spl[[i]]$V1, "\\d{2}\\:\\d{2}\\:\\d{2}"))
result[[i]] = data.frame(date[[i]], time[[i]], value[[i]])
names(result[[i]]) = c("date", "time", "value")
i = i + 1
}
df = do.call(rbind, result)
setwd("c:/todel")
write.csv(df,"result.csv")
View(df)
|
46f6bb9a23d79230b81bb0db3be500832fdd7080 | f246c1d04aeefad2f1595fb9910774a75b98f635 | /Analysis of Variance/mycode[2].R | 239181bb93a6536e96370a827571d93dd1a4da7c | [] | no_license | chouligi/Statistical-Modeling | 542a2ce26add3c5f6db6a5c2f8a85c7b4d8ee85f | 9dc229b0fb9ab92f4930dd45ce879b288ab21f11 | refs/heads/master | 2021-09-04T11:45:42.715012 | 2018-01-18T11:55:57 | 2018-01-18T11:55:57 | 117,973,646 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,358 | r | mycode[2].R | data = chickwts
#Create the boxplot
boxplot(weight~feed,names = c("Casein","Horsebean","Linseed","Meatmeal","Soybean","Sunflower"),
main = "Boxplots of the distribution of chick weights",xlab = "Feed Supplement",ylab = "Weight (grams)",data = data)
#obtain the number of observations and the levels
n = length(data$weight)
i = length(levels(data$feed))
# obtain the grand mean
gm = mean(data$weight)
#create data frame with 60 observations, 10 for each level
#d_horsebean = subset(data, data$feed == "horsebean")
#d_linseed = subset(data, data$feed == "linseed")
#d_soybean = subset(data, data$feed == "soybean")
#d_sunflower = subset(data, data$feed == "sunflower")
#d_meatmeal = subset(data, data$feed == "meatmeal")
#d_casein = subset(data, data$feed == "casein")
#set.seed(10)
#sample_linseed = d_linseed[sample(1:nrow(d_linseed), 10, replace=FALSE),]
#sample_horsebean = d_horsebean[sample(1:nrow(d_horsebean), 10, replace=FALSE),]
#sample_soybean = d_soybean[sample(1:nrow(d_soybean), 10, replace=FALSE),]
#sample_sunflower = d_sunflower[sample(1:nrow(d_sunflower), 10, replace=FALSE),]
#sample_meatmeal = d_meatmeal[sample(1:nrow(d_meatmeal), 10, replace=FALSE),]
#sample_casein = d_casein[sample(1:nrow(d_casein), 10, replace=FALSE),]
#####combine to a dataframe
#new_data = rbind(sample_linseed,sample_horsebean,sample_soybean,sample_sunflower,sample_meatmeal,sample_casein)
#Obtain Incidence Matrix and Response Variable
response = data$weight
feed = data$feed
X = model.matrix(~ feed - 1) # -1 indicates that we remove the intercept
#check the rank of the matrix to verify that it is of full rank
I = qr(X)$rank #Obtain the rank of matrix
X_T = t(X) #X_Transpose
#betahat
betaH = solve(X_T %*% X) %*% X_T %*% response #inverse obtained by solve function
#residuals
e_hat = response - X %*% betaH
#Residuals Sum of Squares
SSE = t(response - X %*% betaH) %*% (response - X %*% betaH)
cat("Residuals Sum of Squares: ",SSE )
wgDf = n - I
meanSSE = SSE/wgDf
cat("Within Groups DF: ", wgDf,"Mean Value: ", meanSSE)
#obtain unbiased estimator of variance
var = SSE / (n-I)
#Between groups sum of squares
#manually
n1=10
n2=12
n3=14
n4=12
n5=11
n6=12
y1=mean(data$weight[1:10])
y2=mean(data$weight[11:22])
y3=mean(data$weight[23:36])
y4=mean(data$weight[37:48])
y5=mean(data$weight[49:59])
y6=mean(data$weight[60:71])
bgSS = n1*(y1-gm)^2+n2*(y2-gm)^2+n3*(y3-gm)^2+n4*(y4-gm)^2+n5*(y5-gm)^2+n6*(y6-gm)^2
cat("Between Groups Sum of Squares: ", bgSS)
bgDf = I - 1
cat("Between Groups DF: ", bgDf)
meanbg = bgSS/bgDf
#total Sum of Squares
TSS = bgSS + SSE
cat("Total Sum of Squares: ", TSS)
#F statistic
f = meanbg/meanSSE
f
cat("F value: ", f)
#obtain p value to determine influence of factor feed supplement
pv = pf(f, bgDf, wgDf, lower.tail = FALSE, log.p = FALSE)
pv
cat("P value:", pv)
#same analysis using anova function
model = aov(weight~feed,data=data)
anova(model)
#check model assumptions
#normality
fit = X %*% betaH
plot(fit,e_hat,xlab="Fitted Values",ylab="Residuals",main= "Plot of the residuals against the fitted values", pch=20, cex=1, col="blue")
abline(a=0, b=0, lty= 2)
qqnorm(e_hat, cex = 1, pch= 20)
qqline(e_hat,lty=3,col="blue")
shapiro.test(e_hat)
## Bartlett's test of homogeneity (homoscedasticity) of variances
bartlett.test(weight~feed,data=data) # H0: homoscedasticity
|
dc040366a87ae64e3c3823d3266eda7cfc7f16f5 | 6bda8f0e8f95220c2ae5e386f1a690fec7ce265f | /root/model/census/download_acs.R | edd3b3f8c2d60bba479e1a20b523424ea9f61e11 | [] | no_license | albabnoor/tlumip | 142e671782ee93d987de47cf40e49f3b7f1edc40 | fbd7d70b87436ac52b01e6e54a4fec31c39ee25c | refs/heads/master | 2020-07-01T08:30:36.694266 | 2019-08-07T18:47:56 | 2019-08-07T18:47:56 | 201,107,427 | 0 | 0 | null | 2019-08-07T18:36:31 | 2019-08-07T18:36:30 | null | UTF-8 | R | false | false | 750 | r | download_acs.R | setwd("c:/projects")
regions = c("pwa","por","pnv","pid","pca","hwa","hor","hnv","hid","hca")
for(region in regions) {
print(region)
#2009 5 year ACS PUMS
url = paste0("http://www2.census.gov/programs-surveys/acs/data/pums/2009/5-Year/csv_", region, ".zip")
outfile = paste0("ss09",region,".zip")
download.file(url, outfile)
unzip(outfile)
file.remove("ACS2005-2009_PUMS_README.pdf")
file.remove(outfile)
#2017 5 year ACS PUMS
url = paste0("http://www2.census.gov/programs-surveys/acs/data/pums/2017/5-Year/csv_", region, ".zip")
outfile = paste0("ss17",region,".zip")
download.file(url, outfile)
unzip(outfile)
file.remove("ACS2013_2017_PUMS_README.pdf")
file.remove(outfile)
} |
62bef538ae171e26522cf021036b256d23f82092 | d55e00329297b6e5dcdd3fc92409149e5c539ab1 | /FullNetworkCSV.R | 47094cc6dce42b194dc9417a3c12fa1594d80a3c | [] | no_license | Buyannemekh/GenerateGraphs | 5073d36e6fda054247e2a591a40e584f33d1207f | 70bc239f86ae23bdf9451ae826e9c7d7ac886c0c | refs/heads/master | 2020-03-21T01:14:47.610895 | 2018-06-19T18:47:20 | 2018-06-19T18:47:20 | 137,931,850 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,494 | r | FullNetworkCSV.R | install.packages('statnet')
library(statnet)
## Download and install the package
install.packages("igraph")
## Load package
library(igraph)
#MODEL WITH triads
numNodes <- 100
avgDegree <- 3
avgTriads <- 1
#Clustering Coefficient = avgTriads / ((avgDegree * (avgDegree - 1))/2)
triadModel.net <- network.initialize(numNodes, directed=F)
triadModel.edges <- (avgDegree * numNodes) / 2
triadModel.triangle <- (avgTriads * numNodes)
summary(triadModel.net)
triadModel.target.stats <- c(triadModel.edges, triadModel.triangle)
triadModel.fit <- ergm(triadModel.net ~ edges + gwesp(0.25,fixed=T) , target.stats = triadModel.target.stats)
summary(triadModel.fit)
triadModel.sim1 <- simulate(triadModel.fit)
summary(triadModel.sim1 ~ edges + triangles)
adj_mat <- triadModel.sim1[,]
write.table(adj_mat,file="./ERGMnetworks/adj_mat1000_d8t45.csv", sep = ",", row.names = FALSE, col.names = FALSE)
#Create graph from adjacency matrix
k <- graph_from_adjacency_matrix(adj_mat, mode = c("undirected"), weighted = NULL, diag = TRUE,
add.colnames = NULL, add.rownames = NA)
coords = layout.fruchterman.reingold(k)
plot(k, layouts=coords, vertex.size=3, vertex.label=NA)
degree_distribution(k)
#Decompose it to giant component
cl = clusters(k)
cl$no
table(cl$csize)
m <- decompose.graph(k)[[which(cl$csize==max(cl$csize))]]
mat_giant <- as_adjacency_matrix(m)
adj_mat_giant <- as.data.frame(as.matrix(mat_giant))
plot(m, layouts=coords, vertex.size=3, vertex.label=NA)
|
bcfde68e548bb7943c56c750cb41c4b930a51936 | 5e832862b2e36be6ba27e874e98499bc399de699 | /man/calc.genoprob.intensity.Rd | b84f7e1a7fe6f73a1579d0735fc5dab88764856a | [] | no_license | dmgatti/DOQTL | c5c22306053ddbd03295207702827cf2a715bb70 | a1a4d170bf5923ca45689a83822febdb46ede215 | refs/heads/master | 2021-01-17T02:08:27.831277 | 2019-05-24T19:22:35 | 2019-05-24T19:22:35 | 13,506,518 | 15 | 12 | null | 2019-02-27T13:46:31 | 2013-10-11T18:33:24 | R | UTF-8 | R | false | false | 2,979 | rd | calc.genoprob.intensity.Rd | \name{calc.genoprob.intensity}
\alias{calc.genoprob.intensity}
\title{Calculate the founder genotype probabilities at each SNP.}
\description{
This function performs genome reconstruction using allele intensities. We recommend using allele intensities where available because they often produce better genotype reconstructions.
}
\usage{
calc.genoprob.intensity(data, chr, founders, snps, output.dir = ".", trans.prob.fxn,
plot = FALSE)
}
\arguments{
\item{data}{
A list with named elements containing the information needed to reconstruct genomes.
When method = intensity:
x: Numeric matrix, num.samples x num.snps, with X intensities for all samples. Sample IDs and SNP IDs must be in rownames and colnames.
y: Numeric matrix, num.samples x num.snps, with Y intensities for all samples. Sample IDs and SNP IDs must be in rownames and colnames.
sex: Character vector, containing "M" or "F" indicating sex. Sample IDs must be in names.
gen: Character matrix containing the generation of DO outbreeding for each sample. For the DO, this should be "DO" followed by a number with no space between them. For CC mice, this should be CC. Sample IDs must be in names.
}
\item{chr}{
Character vector containing chromosomes to run. Must match the chromosome IDs in the snps table. "all" (default) will run all chromosomes.
}
\item{founders}{
List containing founder information for non-DO or CC crosses. \emph{Not required for DO.}
When method = intensity:
x: Numeric matrix, num.samples x num.snps, with X intensities for all founders and F1s (if available). Sample IDs and SNP IDs must be in rownames and colnames.
y: Numeric matrix, num.samples x num.snps, with Y intensities for all founders and F1s (if available). Sample IDs and SNP IDs must be in rownames and colnames.
sex: Character vector, containing "M" or "F" indicating sex. Sample IDs must be in names.
code: Character vector containing two letter genotype codes for each founder sample. Sample IDs must be in names.
}
\item{snps}{
Data.frame containing the marker locations. SNP ID, chromosome, Mb anc cM locations in columns 1 through 4, respectively. \emph{Not required for DO.}
}
\item{output.dir}{
Character string containing the full path where output should be written. The directory must exist already.
}
\item{trans.prob.fxn}{
FALSEunction to call to estimate the transition probabilities between markers for non-DO samples. \emph{Not required for DO.}
}
\item{plot}{
Boolean that is true if the user would like to plot a sample chromosome as the model progresses. Default = TRUE.
}
}
\value{
No value is returned. The output files are written to output.dir.
}
\author{
Daniel Gatti
}
\examples{
\dontrun{
calc.genoprob.intensity(data, chr, founders, snps, output.dir = ".", trans.prob.fxn,
plot = FALSE)
}
}
\keyword{ MUGA }
\keyword{ genotyping }
\keyword{ HMM }
|
f4ea61526cbfe1aa0f50bdb78bff9dc0f8debab1 | d861c6421c8b5b429c27ef32f6570e8c8b0a9909 | /getVitalRates_CoVariance.R | 2d626dce6bb8b6fd581a3b2d3591564b22be1020 | [] | no_license | MariaPaniw/patterns_temporal_autocorrelation | e4861e8ced006b47fe188a8b3c4b9986eb9900ab | 1e53a1fe05c413778e3d530b90234d6e95dd9ce4 | refs/heads/master | 2021-03-30T17:28:21.575252 | 2017-12-11T10:37:07 | 2017-12-11T10:37:07 | 78,664,954 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,584 | r | getVitalRates_CoVariance.R | # Script for Paniw et al. XXXXXX - Appendix S1
#This script peruses through COMPADRE and COMADRE and outputs vital rates, vital rate classes, and vital rate correlation matrix for 109 sub species
#Author: Maria Paniw
#Created: 19 Aug 2011
#Clean memory
rm(list=ls(all=TRUE))
library(stringr)
library(plyr)
# Set the working directory, then load the COMPADRE data:
dir <- setwd("/Users/mariapaniw/Dropbox/TempAutoProject/SuppMat") # CHANGE THIS TO YOUR DIRECTORY
load(paste(dir,"/COMPADRE_v.4.0.0.RData",sep=""))
load(paste(dir, "/COMADRE_v.2.0.0.RData", sep=""))
# load average vital rates
load("matsMean")
# get IDs of species:
sp.data=read.csv("phyloSpecies.csv")
# 109 species with at least 3 annual matrices
ID=c("Primula_elatior","Eryngium_cuneifolium" ,"Agrimonia_eupatoria","Coryphantha_robbinsorum","Petrocoptis_pseudoviscosa_2","Papio_cynocephalus",
"Oenothera_deltoides" ,"Ardisia_elliptica","Lotus_arinagensis", "Mammillaria_napina","Taxus_floridana","Eryngium_alpinum","Propithecus_verreauxi","Cleistes_divaricata_var._bifaria",
"Cleistes_divaricata_var._divaricata","Phyllanthus_indofischeri","Mimulus_cardinalis","Xenosaurus_grandis" ,"Geum_rivale","Limonium_geronense","Erodium_paularense",
"Mimulus_lewisii","Arabis_fecunda","Atriplex_acanthocarpa","Atriplex_canescens" ,"Astragalus_peckii" , "Sapium_sebiferum",
"Helianthemum_polygonoides","Antirrhinum_lopesianum","Rumex_rupestris","Castanea_dentata","Cytisus_scoparius","Purshia_subintegra", "Calochortus_lyallii","Limonium_malacitanum", "Xenosaurus_platyceps",
"Cimicifuga_elata","Silene_spaldingii","Dicerandra_frutescens","Asplenium_adulterinum","Asplenium_cuneifolium", "Polemonium_van-bruntiae",
"Lathyrus_vernus","Pyrrocoma_radiata","Cirsium_vulgare_3", "Cimicifuga_rubifolia","Silene_acaulis","Umbonium_costatum" ,
"Astroblepus_ubidiai","Ramonda_myconi","Cercopithecus_mitis", "Primula_farinosa" ,"Gorilla_beringei","Dioon_caputoi","Anser_caerulescens","Orchis_purpurea","Liatris_scariosa",
"Abies_concolor","Abies_magnifica","Mammillaria_hernandezii_2","Lomatium_bradshawii","Horkelia_congesta","Cecropia_obtusifolia",
"Oxytropis_jabalambrensis","Astragalus_scaphoides_2","Primula_veris_2" ,"Armeria_merinoi","Lomatium_cookii","Pinus_strobus",
"Succisa_pratensis_3","Scolytus_ventralis_2","Euterpe_edulis","Anthropoides_paradiseus","Cirsium_palustre", "Lupinus_tidestromii", "Orcinus_orca_2", "Cypripedium_calceolus","Shorea_leprosula",
"Phyllanthus_emblica_3", "Molinia_caerulea", "Calathea_ovandensis","Paramuricea_clavata","Cryptantha_flava","Cypripedium_fasciculatum",
"Callospermophilus_lateralis","Colias_alexandra","Brachyteles_hypoxanthus","Mammillaria_huitzilopochtli","Catopsis_compacta","Tillandsia_violacea",
"Cebus_capucinus","Santolina_melidensis" ,"Astragalus_tremolsianus" ,"Zea_diploperennis","Astragalus_tyghensis","Actaea_spicata",
"Plantago_media","Ovis_aries_2","Calocedrus_decurrens","Neobuxbaumia_macrocephala","Neobuxbaumia_mezcalaensis","Neobuxbaumia_tetetzo",
"Helianthemum_juliae","Vella_pseudocytisus_subsp._paui","Cirsium_pitcheri_8","Ambloplites_rupestris_2","Cottus_bairdi","Etheostoma_flabellare_2",
"Tillandsia_macdougallii")
sp.data=droplevels(sp.data[sp.data$SpeciesAuthor%in%ID&sp.data$MatrixComposite=="Individual",])
## take out problematic populations
sp.data=droplevels(sp.data[-which(sp.data$MatrixPopulation%in%c("Transitional Forest","Morningside Nature Center","Bottomland hardwood forest","Young mixed pine-hardwood forest",
"Bull Flat","Campion Crest","Pass","Ridge","Wawona","Abbotts Langdon","Haynes Creek","Sheep Corral Gulch",
"La Pedrera","Plot 1","Plot 2","Plot 4","Site 4","Schinus thicket","S2")),])
### FOR REAL VITAL RATES
matsVarCov=vector("list", length(unique(sp.data$SpeciesAuthor)))
for(i in 1:length(unique(sp.data$SpeciesAuthor))){
# subset species and sites
sp=as.character(unique(sp.data$SpeciesAuthor)[i])
site=as.character(unique(sp.data$MatrixPopulation[sp.data$SpeciesAuthor==sp]))
## periodicity
per=sp.data$AnnualPeriodicity[sp.data$SpeciesAuthor==sp][1]
# empty vector to hold variance (varvar) and covariance (varcov)
varvar=vector("list", length(site))
varcov=vector("list", length(site))
for(j in 1:length(site)){
# FOR PLANTS/ALGAE
if(sp.data$Kingdom[sp.data$SpeciesAuthor==sp][1]=="Plantae"|sp.data$Kingdom[sp.data$SpeciesAuthor==sp][1]=="Chromalveolata"){
index=which(compadre$metadata$SpeciesAuthor==sp &
compadre$metadata$MatrixPopulation==site[j] &
compadre$metadata$MatrixComposite == "Individual")
matsVar=compadre$mat[index]
# mean MPM for species
indexMU=which(duplicated(compadre$metadata$SpeciesAuthor)==FALSE&
compadre$metadata$SpeciesAuthor==sp)
matU=compadre$mat[indexMU][[1]]$matU
matF=compadre$mat[indexMU][[1]]$matF
# FOR ANIMALS
}else{
index=which(comadre$metadata$SpeciesAuthor==sp &
comadre$metadata$MatrixPopulation==site[j] &
comadre$metadata$MatrixComposite == "Individual")
matsVar=comadre$mat[index]
# mean MPM for species
indexMU=which(duplicated(comadre$metadata$SpeciesAuthor)==FALSE&
comadre$metadata$SpeciesAuthor==sp)
matU=comadre$mat[indexMU][[1]]$matU
matF=comadre$mat[indexMU][[1]]$matF
}
# fix matrices for individual species
if(sp=="Orcinus_orca_2") matsVar<-matsVar[-25]
if(sp=="Cirsium_vulgare_3") matsVar<-matsVar[-c(1,4,8)]
if(sp=="Colias_alexandra"){
matsVar<-matsVar[-c(5,7)]
matsVar[[1]]$matF[1,7]=0
}
# Average reproduction across years (to deal with 0 fecundities for some years)
Fec.mu=matrix(0,dim(matsVar[[1]]$matF)[1],dim(matsVar[[1]]$matF)[1])
for(bb in 1:length(matsVar)){
Fec.mu=Fec.mu+matsVar[[bb]]$matF
}
# get vital rates per MPM per site per species
vr.all=NULL
for(b in 1:length(matsVar)){
### survival
surv=colSums(matsVar[[b]]$matU)^per # account for periodicity
if(surv[length(surv)]>0.99999) surv[length(surv)]<-0.995 # prevent survival of last stage/age to be 1 - it will make simulations unstable
names(surv)=paste("s",1:length(surv),sep="")
U.mat=matsVar[[b]]$matU
U.mat.g=U.mat
for(xx in 1:ncol(U.mat.g)){
U.mat.g[,xx]=U.mat.g[,xx]/colSums(U.mat)[xx]
U.mat[,xx]=U.mat.g[,xx]*surv[xx]
}
U.mat[!is.finite(U.mat)]=0
U.mat.g[!is.finite(U.mat.g)]=0
# progression
gr=U.mat.g[lower.tri(U.mat.g)]
names=NULL
for(x in 1:length(surv[-1])){
x1=str_pad(x, 2, pad = "0")
x2=str_pad((x+1):length(surv), 2, pad = "0")
temp=paste("g",paste(x2,x1,sep=""),sep="")
names=c(names,temp)
}
names(gr)=names
# retrogression
ret=U.mat.g[upper.tri(U.mat.g)]
names=NULL
for(x in 2:length(surv)){
x1=str_pad(x, 2, pad = "0")
x2=str_pad(1:(x-1), 2, pad = "0")
temp=paste("r",paste(x2,x1,sep=""),sep="")
names=c(names,temp)
}
names(ret)=names
# reproduction (if MPM has 0 reproduction)
if(length(which(matsVar[[b]]$matF>0))==0){
placeholder=matrix(1:length(as.numeric(matsVar[[b]]$matF)),dim(matsVar[[b]]$matF)[1],dim(matsVar[[1]]$matF)[1])
colnames(placeholder)=rownames(placeholder)=1:dim(matsVar[[b]]$matF)[1]
fec.names=which(Fec.mu>0)
names=expand.grid(rownames(placeholder),colnames(placeholder))[placeholder%in%fec.names,]
names=interaction(str_pad(as.numeric(names$Var1),2,pad="0"),str_pad(as.numeric(names$Var2),2,pad="0"),sep="")
fec=paste("f",names ,sep="")
fec.value=rep(0,length(fec))
names(fec.value)=fec
}else{
placeholder=matrix(1:length(as.numeric(matsVar[[b]]$matF)),dim(matsVar[[b]]$matF)[1],dim(matsVar[[1]]$matF)[1])
colnames(placeholder)=rownames(placeholder)=1:dim(matsVar[[b]]$matF)[1]
fec.names=which(Fec.mu>0)
names=expand.grid(rownames(placeholder),colnames(placeholder))[placeholder%in%fec.names,]
names2=as.numeric(rownames((names)))
names=interaction(str_pad(as.numeric(names$Var1),2,pad="0"),str_pad(as.numeric(names$Var2),2,pad="0"),sep="")
fec=paste("f",names ,sep="")
fec.value=as.numeric(matsVar[[b]]$matF)[names2]*per #account for periodicity
names(fec.value)=fec
}
vr=matrix(c(surv,gr,ret,fec.value),ncol=length(c(surv,gr,ret,fec.value)))
colnames(vr)=names(c(surv,gr,ret,fec.value))
if(b==1){
vr.all=rbind(vr.all,vr)
}else{vr.all=rbind.fill.matrix(vr.all,vr) }
}
# for each site:
varcov.sub=cor(vr.all,method="spearman") # correlation
varcov.sub[is.na(varcov.sub)]=0
var.sub=diag(var(vr.all))# variance
var.sub[is.na(var.sub)]=0
varcov[[j]]=varcov.sub
varvar[[j]]=var.sub
}
# take mean correlation across sites
varcov.a=array(unlist(varcov), dim = c(nrow(varcov[[1]]), ncol(varcov[[1]]), length(varcov)))
varcov.mu=apply(varcov.a,c(1,2),mean,na.rm=T)
colnames(varcov.mu)=rownames(varcov.mu)=colnames(varcov.sub)
# take mean variance across sites
varvar.a=array(unlist(varvar), dim = c(1, length(varvar[[1]]), length(varvar)))
varvar.mu=as.numeric(apply(varvar.a,c(1,2),mean,na.rm=T))
names(varvar.mu)=names(var.sub)
# remove vital rates with 0 variance of correlation
if(any(varvar.mu==0)){
sub=varcov.mu[-which(varvar.mu==0),-which(varvar.mu==0)]
sub2=varvar.mu[-which(varvar.mu==0)]
}else{
sub=varcov.mu
sub2=varvar.mu
}
matsVarCov[[i]]$var=sub2
matsVarCov[[i]]$corr=sub
matsVarCov[[i]]$matU=matU
matsVarCov[[i]]$matF=matF
matsVarCov[[i]]$vr.mu=mats[[which(sapply(lapply(mats, function(ch) grep(sp, ch)), function(x) length(x) > 0))]]$vr
matsVarCov[[i]]$species=sp
}
# save results
save(matsVarCov,file="matsVarCov")
|
6f410444d1ebe5f9d99d86037087467507169409 | 2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89 | /inst/testScripts/system/chipTypes/Mapping10K_Xba142/21.doCRMAv2,CBS.R | c5c43006a8389a674d25091145b4c8b358253b50 | [] | no_license | HenrikBengtsson/aroma.affymetrix | a185d1ef3fb2d9ee233845c0ae04736542bb277d | b6bf76f3bb49474428d0bf5b627f5a17101fd2ed | refs/heads/master | 2023-04-09T13:18:19.693935 | 2022-07-18T10:52:06 | 2022-07-18T10:52:06 | 20,847,056 | 9 | 4 | null | 2018-04-06T22:26:33 | 2014-06-15T03:10:59 | R | UTF-8 | R | false | false | 465 | r | 21.doCRMAv2,CBS.R | library("aroma.affymetrix")
verbose <- Arguments$getVerbose(-4, timestamp=TRUE)
dataSet <- "GSE8605"
chipType <- "Mapping10K_Xba142"
dsT <- doCRMAv2(dataSet, chipType=chipType, verbose=verbose)
print(dsT)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# CBS
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
segB <- CbsModel(dsT)
print(segB)
# Try to segment
fit(segB, arrays=1:2, chromosomes=19, verbose=verbose)
|
698b93040834359e1446693d66cb2729bfbf14db | 29891624cdb77ca6a43b683cc8d668612590e877 | /R/setup.asap.w.R | ebd391d68db530c30d658b6ac0ec1a9ca525ffb4 | [] | no_license | kellijohnson-NOAA/saconvert | e8f3d0aa853cf58a050826ccdf4aa35804b1556e | d004f5cee8af1edb27fe8a15ffac41cfc1ac61d6 | refs/heads/master | 2022-07-07T16:04:06.041578 | 2022-01-16T15:41:23 | 2022-01-18T14:39:01 | 230,995,952 | 0 | 2 | null | 2021-07-09T17:24:02 | 2019-12-30T23:56:04 | R | UTF-8 | R | false | false | 26,660 | r | setup.asap.w.R | # Code to take ICES format and convert to ASAP
# for ICES-WGMG projects
# assumptions for call to setup.asap begin ~line 742
# Liz Brooks
# Version 1.0
# also uses : SAM read.ices fn modified by Dan Hennen (starting line 422)
##
#rm(list=ls(all.names=F))
#graphics.off()
#==============================================================
## User specify below
#-------------------
#user.wd <- "" #user: specify path to working directory where ICES files are
#user.od <- "" #user: specify path to output directory
#model.id <- "CCGOMyt_" # user: specify prefix found on ICES files (will create same name for ASAP case)
#-------------------
#user.wd <- "C:/liz/SAM/GBhaddock/" # user: specify path to working directory where ICES files are
#user.od <- "C:/liz/SAM/GBhaddock/" # user: specify path to output directory
#model.id <- "GBhaddock_" # user: specify prefix found on ICES files (will create same name for ASAP case)
#-------------------
#user.wd <- "C:/liz/SAM/GBwinter/" # user: specify path to working directory where ICES files are
#user.od <- "C:/liz/SAM/GBwinter/" # user: specify path to output directory
#model.id <- "GBwinter_" # user: specify prefix found on ICES files (will create same name for ASAP case)
#-------------------
#user.wd <- "C:/liz/SAM/Plaice/" # user: specify path to working directory where ICES files are
#user.od <- "C:/liz/SAM/Plaice/" # user: specify path to output directory
#model.id <- "Plaice_" # user: specify prefix found on ICES files (will create same name for ASAP case)
#-------------------
#user.wd <- "C:/liz/SAM/NScod/" # user: specify path to working directory where ICES files are
#user.od <- "C:/liz/SAM/NScod/" # user: specify path to output directory
#model.id <- "ICEHerr_" # user: specify prefix found on ICES files (will create same name for ASAP case)
## *** Notes: had to append "NScod_" to all ICES filenames
#-------------------
#user.wd <- "C:/liz/SAM/ICEherring/" # user: specify path to working directory where ICES files are
#user.od <- "C:/liz/SAM/ICEherring/" # user: specify path to output directory
#model.id <- "ICEherring_" # user: specify prefix found on ICES files (will create same name for ASAP case)
# *** Notes: only VPA files available now; need to convert to ICES format before running this
#-------------------
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Function to set-up asap3 "west coast style"
# Liz Brooks
# Version 1.0
# Created 30 September 2010
# Last Modified: 18 September 2013
# 16 November 2017 for ices-wgmg
# 21 November 2017: tested & works on CCGOMyt, GBhaddock, GBwinter, Plaice, NScod
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#' @param wd working directory path (where files are read from)
#' @param od output directory path (where files are written)
#' @param model.id model identifier
#' @param nyears total number of years of data
#' @param first.year first year of data
#' @param asap.nages number of age classes (age 1 is first age class by default)
#' @param nfleets number of fishing fleets
#' @param nselblks total number of selectivity blocks (sum for all fleets)
#' @param n.ind.avail number of available indices (whether or you "turn them on" to be used)
#' @param M.mat matrix of natural mortality by age (col) and year (row)
#' @param fec.opt 0(use WAA*mat.age) or 1 (use empirical fecundity at age values)
#' @param t.spawn fraction of year elapsed prior to ssb calcs
#' @param mat.mat maturity matrix by age (col) and year (row)
#' @param n.waa.mats xxx
#' @param waa.array xxx
#' @param waa.pointer.vec xxx
#' @param sel.blks a vertical vector of nselblks*nyears
#' @param sel.types vector of length nselblks (1=by age; 2= logistic; 3= double logistic)
#' @param sel.mats nselblks X matrix(sel.specs, nrow= nages+6, ncol=4)
#' @param fleet.age1 starting age for selectivity by fleet
#' @param fleet.age2 ending age for selectivity by fleet
#' @param F.report.ages vector of 2 ages for summarizing F trend
#' @param F.report.opt option to report F as unweighted(1), Nweighted(2), Bweighted(3)
#' @param like.const flag to use(1) or not(0) likelihood constants
#' @param rel.mort.fleet flag for whether there is release mortality by fleet (nfleets entries)
#' @param caa.mats nfleets X cbind(matrix(caa, nyears,nages), tot.cat.biomass)
#' @param daa.mats nfleets X cbind(matrix(disc.aa, nyears, nages), tot.disc.biomass)
#' @param rel.prop nfleets X matrix(release.prop.aa, nyears, nages)
#' @param units.ind n.ind.avail vector for units (1=biomass, 2=number)
#' @param time.ind n.ind.avail vector for month index sampled
#' @param fish.ind link to fleet (-1 if no link, fleet.number otherwise)
#' @param sel.ind functional form for indices (n.ind.avail)
#' @param ind.age1 first age each index selects (n.ind.avail)
#' @param ind.age2 last age each index selects (n.ind.avail)
#' @param ind.use flag to use(1) or not(0) each index
#' @param ind.sel.mats n.ind.avail X matrix(sel.specs, nrow= nages+6, ncol=4)
#' the 6 additional are: Units, month, sel.link.to.fleet, sel.start.age, sel.end.age, use.ind
#' @param ind.mat n.ind.avail X matrix(index.stuff, nyears, ncol=nages+4)
#' ICES one-offs (calls function get.index.mat)
#' @param ind.cv one-off for ICES (CV assumed for all indices, all years)
#' @param ind.neff one-off for ICES (Effectice Number assumed for all indices, all years)
#' end ICES one-offs
#' @param p.Fmult1 phase for estimating F mult in 1st year
#' @param p.Fmult.dev phase for estimating devs for Fmult
#' @param p.recr.dev phase for estimating recruitment deviations
#' @param p.N1 phase for estimating N in 1st year
#' @param p.q1 phase for estimating q in 1st year
#' @param p.q.dev phase for estimating q deviations
#' @param p.SR phase for estimating SR relationship
#' @param p.h phase for estimating steepness
#' @param recr.CV vertical vector of CV on recruitment per year
#' @param lam.ind lambda for each index
#' @param lam.c.wt lambda for total catch in weight by fleet
#' @param lam.disc lambda for total discards at age by fleet
#' @param catch.CV matrix(CV.fleet, nyears, nfleets)
#' @param disc.CV matrix(CV.fleet, nyears, nfleets)
#' @param Neff.catch input effective sample size for CAA (matrix(Neff, nyears, nfleets)
#' @param Neff.disc input effective sample size for disc.AA (matrix(Neff, nyears, nfleets)
#' @param lam.Fmult.y1 lambda for Fmult in first year by fleet (nfleets)
#' @param CV.Fmult.y1 CV for Fmult in first year by fleet (nfleets)
#' @param lam.Fmult.dev lambda for Fmult devs by fleet (nfleets)
#' @param CV.Fmult.dev CV for Fmult deviations by fleet (nfleets)
#' @param lam.N1.dev lambda for N in 1st year devs
#' @param CV.N1.dev CV for N in 1st year devs
#' @param lam.recr.dev lambda for recruitment devs
#' @param lam.q.y1 lambda for q in 1st yr by index (n.ind.avail)
#' @param CV.q.y1 CV for q in 1st yr by index (n.ind.avail)
#' @param lam.q.dev lambda for q devs (n.ind.avail)
#' @param CV.q.dev CV for q devs (n.ind.avail)
#' @param lam.h lambda for deviation from initial steepness
#' @param CV.h CV for deviation from initial steepness
#' @param lam.SSB0 lambda for deviation from SSB0
#' @param CV.SSB0 CV for deviation from SSB0
#' @param naa.y1 vector(nages) of initial stock size
#' @param Fmult.y1 initial guess for Fmult in yr1 (nfleets)
#' @param q.y1 q in 1st year vector(n.ind.avail)
#' @param SSB0 initial unexploited stock size
#' @param h.guess guess for initial steepness
#' @param F.max upper bound on Fmult
#' @param ignore.guess flag to ignore(1) or not(0) initial guesses
#' @param do.proj flag to do(1) or not(0) projections
#' @param fleet.dir rep(1,nfleets)
#' @param proj.yr (nyears+2)
#' @param proj.specs matrix(proj.dummy, nrow=2, ncol=5)
#' @param do.mcmc 0(no) or 1(yes)
#' @param mcmc.nyr.opt 0(use.NAA.last.yr), 1(use.NAA.T+1)
#' @param mcmc.nboot number of mcmc iterations
#' @param mcmc.thin thinning rate for mcmc
#' @param mcmc.seed random number seed for mcmc routine
#' @param recr.agepro 0(use NAA), 1 (use S-R), 2(use geometric mean of previous years)
#' @param recr.start.yr starting year for calculation of R
#' @param recr.end.yr ending year for calculation of R
#' @param test.val -23456
#' @param fleet.names xxx
#' @param survey.names xxx
#' @param disc.flag T if discards present, F otherwise
#' @param catch.ages xxx
#' @param survey.ages xxx
setup.asap.w <-function(wd, od, model.id, nyears, first.year, asap.nages, nfleets,
nselblks, n.ind.avail, M.mat, fec.opt, t.spawn, mat.mat, n.waa.mats, waa.array, waa.pointer.vec,
sel.blks, sel.types, sel.mats, fleet.age1, fleet.age2, F.report.ages, F.report.opt,
like.const, rel.mort.fleet, caa.mats, daa.mats, rel.prop, units.ind, time.ind,
fish.ind, sel.ind, ind.age1, ind.age2, ind.use, ind.sel.mats, ind.mat, ind.cv, ind.neff,
p.Fmult1, p.Fmult.dev, p.recr.dev, p.N1, p.q1, p.q.dev, p.SR, p.h, recr.CV, lam.ind,
lam.c.wt, lam.disc, catch.CV, disc.CV, Neff.catch, Neff.disc, lam.Fmult.y1,
CV.Fmult.y1, lam.Fmult.dev, CV.Fmult.dev, lam.N1.dev, CV.N1.dev, lam.recr.dev,
lam.q.y1, CV.q.y1, lam.q.dev, CV.q.dev, lam.h, CV.h, lam.SSB0, CV.SSB0,
naa.y1, Fmult.y1, q.y1, SSB0, h.guess, F.max, ignore.guess,
do.proj, fleet.dir, proj.yr, proj.specs,
do.mcmc, mcmc.nyr.opt, mcmc.nboot, mcmc.thin, mcmc.seed,
recr.agepro, recr.start.yr, recr.end.yr, test.val,
fleet.names, survey.names, disc.flag, catch.ages, survey.ages ) {
# c.waa catch weight at age (col) and year (row)
# ssb.waa ssb weight at age (col) and year (row)
# jan1.waa jan-1 weight at age (col) and year (row)
#---------------------------------------------------------------------
#### SET-UP ASAP FILE
#_________________________________________________________________
out.file = paste(od,"ASAP_", model.id, ".dat", sep="")
write('# ASAP VERSION 3.0 setup by convert_ICES_asap.r', file=out.file, append=F)
write(paste('# MODEL ID ', model.id, sep=''),file=out.file,append=T)
write( '# Number of Years' , file=out.file,append=T)
write(nyears, file=out.file,append=T )
write('# First year', file=out.file,append=T) #proportion F before spawning
write(first.year, file=out.file,append=T ) #proportion M before spawning
write('# Number of ages', file=out.file,append=T) #single value for M
write(asap.nages, file=out.file,append=T ) #last year of selectivity
write('# Number of fleets', file=out.file,append=T) #last year of maturity
write(nfleets, file=out.file,append=T ) #last year of catch WAA
write('# Number of selectivity blocks', file=out.file,append=T) #last year of stock biomass
write(nselblks, file=out.file,append=T ) #number of F grid values
write('# Number of available indices', file=out.file,append=T) #
write(n.ind.avail, file=out.file,append=T ) #specifies BH or Ricker
write( '# M matrix' , file=out.file,append=T) #, ncolumns=(nyears))
write(t(M.mat), file=out.file,append=T, ncolumns=asap.nages)
write('# Fecundity option', file=out.file,append=T) #specifies normal or lognormal error
write(fec.opt, file=out.file,append=T) #
write('# Fraction of year elapsed before SSB calculation', file=out.file,append=T) #
write(t.spawn , file=out.file,append=T) #
write( '# MATURITY matrix' , file=out.file,append=T) #, ncolumns=(nyears))
write(t(mat.mat), file=out.file,append=T, ncolumns=asap.nages)
write( '# Number of WAA matrices' , file=out.file,append=T) #, ncolumns=(nyears))
write(n.waa.mats, file=out.file,append=T, ncolumns=asap.nages)
write( '# WAA matrix-1' , file=out.file,append=T) #, ncolumns=(nyears))
write(t(waa.array[,,1]), file=out.file,append=T, ncolumns=asap.nages)
if (n.waa.mats>1) {
for (j in 2:n.waa.mats) {
write(paste('# WAA matrix-',j, sep=""), file=out.file,append=T, ncolumns=asap.nages)
write(t(waa.array[,,j]), file=out.file,append=T, ncolumns=asap.nages)
} # end loop over j (for WAA matrices)
} # end if-test for n.waa.mat
#write('# test', file=out.file,append=T)
write( '# WEIGHT AT AGE POINTERS' , file=out.file,append=T) #, ncolumns=(nyears))
write(waa.pointer.vec, file=out.file,append=T, ncolumns=1)
write( '# Selectivity blocks (blocks within years)' , file=out.file,append=T) #, ncolumns=(nyears))
for(i in 1:nfleets)
{
write(paste0('# Fleet ', i, ' Selectivity Block Assignment') , file=out.file,append=T) #, ncolumns=(nyears))
write(sel.blks[(i-1)*nyears + 1:nyears], file=out.file,append=T, ncolumns=1)
}
write( '# Selectivity options for each block' , file=out.file,append=T) #, ncolumns=(nyears))
write(t(sel.types), file=out.file,append=T, ncolumns=nselblks)
temp = t(sel.mats)
temp = sel.mats
x = asap.nages+6
for(i in 1:nselblks)
{
write(paste0('# Selectivity Block #', i, " Data") , file=out.file,append=T) #, ncolumns=(nyears))
write(t(temp[(i-1)*x + 1:x,]), file=out.file,append=T, ncolumns=4)
}
write( '# Selectivity start age by fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write(fleet.age1, file=out.file,append=T, ncolumns=nfleets )
write( '# Selectivity end age by fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write(fleet.age2, file=out.file,append=T, ncolumns=nfleets )
write( '# Age range for average F' , file=out.file, append=T) #, ncolumns=(nyears))
write(F.report.ages, file=out.file,append=T, ncolumns=2)
write( '# Average F report option ' , file=out.file,append=T) #, ncolumns=(nyears))
write(F.report.opt, file=out.file,append=T, ncolumns=2)
write( '# Use likelihood constants?' , file=out.file,append=T) #, ncolumns=(nyears))
write(like.const, file=out.file, append=T )
write( '# Release Mortality by fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write( rel.mort.fleet, file=out.file,append=T, ncolumns=nfleets)
#write( '# Catch at age matrices (nyears*nfleets rows)' , file=out.file,append=T) #, ncolumns=(nyears))
write( '# Catch Data', file=out.file,append=T) #, ncolumns=(nyears))
for(i in 1:nfleets)
{
write(paste0("# Fleet-", i, " Catch Data"), file=out.file,append=T)
write(t(caa.mats[(i-1)*nyears + 1:nyears,]), file=out.file,append=T, ncolumns= (asap.nages+1) )
}
write( '# Discards at age by fleet' , file=out.file,append=T) #, ncolumns=(nyears))
for(i in 1:nfleets)
{
write(paste0("# Fleet-", i, " Discards Data"), file=out.file,append=T)
write(t(daa.mats[(i-1)*nyears + 1:nyears,]), file=out.file,append=T, ncolumns= (asap.nages+1) )
}
write( '# Release proportion at age by fleet' , file=out.file,append=T) #, ncolumns=(nyears))
for(i in 1:nfleets)
{
write(paste0("# Fleet-", i, " Release Data"), file=out.file,append=T)
write(t(rel.prop[(i-1)*nyears + 1:nyears,]), file=out.file,append=T, ncolumns= asap.nages )
}
write( '# Survey Index Data' , file=out.file,append=T) #, ncolumns=(nyears))
write( '# Index units' , file=out.file,append=T) #, ncolumns=(nyears))
write(units.ind, file=out.file,append=T, ncolumns=n.ind.avail )
write( '# Index Age comp. units' , file=out.file,append=T) #, ncolumns=(nyears))
write(units.ind, file=out.file,append=T, ncolumns=n.ind.avail )
write( '# Index WAA matrix' , file=out.file,append=T) #, ncolumns=(nyears))
write((rep(1,n.ind.avail)), file=out.file,append=T, ncolumns=n.ind.avail )
write( '# Index month' , file=out.file, append=T) #, ncolumns=(nyears))
write(time.ind, file=out.file,append=T, ncolumns=n.ind.avail )
write( '# Index link to fleet? ' , file=out.file,append=T) #, ncolumns=(nyears))
write(fish.ind, file=out.file,append=T, ncolumns=n.ind.avail)
write( '# Index selectivity option ' , file=out.file,append=T) #, ncolumns=(nyears))
write(sel.ind, file=out.file,append=T, ncolumns=n.ind.avail)
write( '# Index start age' , file=out.file,append=T) #, ncolumns=(nyears))
write(ind.age1, file=out.file, append=T, ncolumns=n.ind.avail )
write( '# Index end age' , file=out.file,append=T) #, ncolumns=(nyears))
write(ind.age2, file=out.file, append=T, ncolumns=n.ind.avail )
write( '# Index Estimate Proportion (YES=1)' , file=out.file,append=T) #, ncolumns=(nyears))
write(t(rep(1,n.ind.avail)), file=out.file, append=T, ncolumns=n.ind.avail )
write( '# Use Index' , file=out.file,append=T) #, ncolumns=(nyears))
write(ind.use, file=out.file, append=T, ncolumns=n.ind.avail )
x = asap.nages+6
for(i in 1:n.ind.avail)
{
write(paste0('# Index-', i, ' Selectivity Data') , file=out.file,append=T) #, ncolumns=(nyears))
write(t(ind.sel.mats[(i-1)*x + 1:x,]), file=out.file,append=T, ncolumns=4)
}
write( '# Index data matrices (n.ind.avail.*nyears)' , file=out.file,append=T) #, ncolumns=(nyears))
# ----------one-off for ICES to ASAP
for ( kk in 1:length(ind.use)) {
if (ind.use[kk]==1) {
write( paste0('# Index ', survey.names[kk]) , file=out.file,append=T) #, ncolumns=(nyears))
tmp.s <- ind.mat[[kk]]
ind.mat2 <- get.index.mat(tmp.s, ind.cv, ind.neff, first.year, nyears, catch.ages, survey.ages[[kk]])
write(t(ind.mat2), file=out.file,append=T, ncolumns=(asap.nages + 4) )
} # end ind.use test
} #end kk loop
# ----------one-off for ICES to ASAP
write( '#########################################' , file=out.file,append=T) #, ncolumns=(nyears))
write( '# Phase data' , file=out.file,append=T) #, ncolumns=(nyears))
write( '# Phase for Fmult in 1st year' , file=out.file,append=T) #, ncolumns=(nyears))
write(p.Fmult1, file=out.file,append=T )
write( '# Phase for Fmult deviations' , file=out.file, append=T) #, ncolumns=(nyears))
write(p.Fmult.dev, file=out.file,append=T )
write( '# Phase for recruitment deviations ' , file=out.file,append=T) #, ncolumns=(nyears))
write(p.recr.dev, file=out.file,append=T )
write( '# Phase for N in 1st year ' , file=out.file,append=T) #, ncolumns=(nyears))
write(p.N1, file=out.file,append=T )
write( '# Phase for catchability in 1st year' , file=out.file,append=T) #, ncolumns=(nyears))
write(p.q1, file=out.file, append=T )
write( '# Phase for catchability deviations' , file=out.file,append=T) #, ncolumns=(nyears))
write(p.q.dev, file=out.file, append=T )
write( '# Phase for stock recruit relationship' , file=out.file,append=T) #, ncolumns=(nyears))
write(p.SR, file=out.file, append=T )
write( '# Phase for steepness' , file=out.file,append=T) #, ncolumns=(nyears))
write(p.h, file=out.file,append=T )
write( '#########################################' , file=out.file,append=T) #, ncolumns=(nyears))
write( '# Lambdas and CVs' , file=out.file,append=T) #, ncolumns=(nyears))
write( '# Recruitment CV by year' , file=out.file,append=T) #, ncolumns=(nyears))
write(recr.CV, file=out.file,append=T , ncolumns=1 )
write( '# Lambda for each index' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.ind, file=out.file,append=T, ncolumns=n.ind.avail )
write( '# Lambda for Total catch in weight by fleet' , file=out.file, append=T) #, ncolumns=(nyears))
write(lam.c.wt, file=out.file,append=T, ncolumns=nfleets )
write( '# Lambda for total discards at age by fleet ' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.disc, file=out.file,append=T, ncolumns=nfleets )
write( '# Catch Total CV by year and fleet ' , file=out.file,append=T) #, ncolumns=(nyears))
write(catch.CV, file=out.file,append=T, ncolumns=nfleets )
write( '# Discard total CV by year and fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write(disc.CV, file=out.file, append=T, ncolumns=nfleets )
write( '# Input effective sample size for catch at age by year and fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write(Neff.catch, file=out.file, append=T, ncolumns=nfleets )
write( '# Input effective sample size for discards at age by year and fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write(Neff.disc, file=out.file, append=T , ncolumns=nfleets )
write( '# Lambda for Fmult in first year by fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.Fmult.y1, file=out.file,append=T, ncolumns=nfleets )
write( '# CV for Fmult in first year by fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write(CV.Fmult.y1, file=out.file,append=T, ncolumns=nfleets )
write( '# Lambda for Fmult deviations' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.Fmult.dev, file=out.file,append=T, ncolumns=nfleets )
write( '# CV for Fmult deviations' , file=out.file,append=T) #, ncolumns=(nyears))
write(CV.Fmult.dev, file=out.file,append=T, ncolumns=nfleets )
write( '# Lambda for N in 1st year deviations ' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.N1.dev, file=out.file,append=T )
write( '# CV for N in 1st year deviations ' , file=out.file,append=T) #, ncolumns=(nyears))
write(CV.N1.dev, file=out.file,append=T )
write( '# Lambda for recruitment deviations' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.recr.dev, file=out.file, append=T )
write( '# Lambda for catchability in first year by index' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.q.y1, file=out.file, append=T, ncolumns=n.ind.avail )
write( '# CV for catchability in first year by index' , file=out.file,append=T) #, ncolumns=(nyears))
write(CV.q.y1, file=out.file, append=T , ncolumns=n.ind.avail )
write( '# Lambda for catchability deviations by index' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.q.dev, file=out.file,append=T, ncolumns=n.ind.avail )
write( '# CV for catchability deviations by index' , file=out.file,append=T) #, ncolumns=(nyears))
write(CV.q.dev, file=out.file,append=T )
write( '# Lambda for deviation from initial steepness' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.h, file=out.file,append=T )
write( '# CV for deviation from initial steepness' , file=out.file,append=T) #, ncolumns=(nyears))
write(CV.h, file=out.file,append=T )
write( '# Lambda for deviation from initial SSB0 ' , file=out.file,append=T) #, ncolumns=(nyears))
write(lam.SSB0, file=out.file,append=T )
write( '# CV for deviation from initial SSB0 ' , file=out.file,append=T) #, ncolumns=(nyears))
write(CV.SSB0, file=out.file,append=T )
write( '# NAA Deviations flag (1= , 0= ) ' , file=out.file,append=T) #, ncolumns=(nyears))
write(1, file=out.file,append=T )
write('###########################################', file=out.file, append=T)
write('### Initial Guesses', file=out.file, append=T)
write( '# NAA for year1' , file=out.file,append=T) #, ncolumns=(nyears))
write(naa.y1, file=out.file, append=T, ncolumns=asap.nages )
write( '# Fmult in 1st year by fleet' , file=out.file,append=T) #, ncolumns=(nyears))
write(Fmult.y1, file=out.file, append=T, ncolumns=nfleets )
write( '# Catchability in 1st year by index' , file=out.file,append=T) #, ncolumns=(nyears))
write(q.y1, file=out.file, append=T )
write( '# S-R Unexploited specification (1= 0=)' , file=out.file,append=T) #, ncolumns=(nyears))
write(1, file=out.file,append=T, ncolumns=n.ind.avail )
write( '# Unexploited initial guess' , file=out.file,append=T) #, ncolumns=(nyears))
write(SSB0, file=out.file,append=T, ncolumns=n.ind.avail )
write( '# Steepness initial guess' , file=out.file,append=T) #, ncolumns=(nyears))
write(h.guess, file=out.file,append=T )
write( '# Maximum F (upper bound on Fmult)' , file=out.file,append=T) #, ncolumns=(nyears))
write(F.max, file=out.file,append=T )
write( '# Ignore guesses' , file=out.file,append=T) #, ncolumns=(nyears))
write(ignore.guess, file=out.file,append=T )
write('###########################################', file=out.file, append=T)
write('### Projection Control data', file=out.file, append=T)
write( '# Do projections' , file=out.file,append=T) #, ncolumns=(nyears))
write(do.proj, file=out.file, append=T )
write( '# Fleet directed flag' , file=out.file,append=T) #, ncolumns=(nyears))
write(fleet.dir, file=out.file, append=T, ncolumns=nfleets )
write( '# Final year of projections' , file=out.file,append=T) #, ncolumns=(nyears))
write(proj.yr, file=out.file, append=T )
write( '# Year, projected recruits, what projected, target, non-directed Fmult ' , file=out.file,append=T) #, ncolumns=(nyears))
write(t(proj.specs), file=out.file,append=T, ncolumns=5 )
write('###########################################', file=out.file, append=T)
write('### MCMC Control data', file=out.file, append=T)
write( '# do mcmc' , file=out.file,append=T) #, ncolumns=(nyears))
write(do.mcmc, file=out.file,append=T )
write( '# MCMC nyear option' , file=out.file,append=T) #, ncolumns=(nyears))
write(mcmc.nyr.opt, file=out.file,append=T )
write( '# MCMC number of saved iterations desired' , file=out.file,append=T) #, ncolumns=(nyears))
write(mcmc.nboot, file=out.file,append=T )
write( '# MCMC thinning rate' , file=out.file,append=T) #, ncolumns=(nyears))
write(mcmc.thin, file=out.file,append=T )
write( '# MCMC random number seed' , file=out.file,append=T) #, ncolumns=(nyears))
write(mcmc.seed, file=out.file,append=T )
write('###########################################', file=out.file, append=T)
write('### A few AGEPRO specs', file=out.file, append=T)
write( '# R in agepro.bsn file' , file=out.file,append=T) #, ncolumns=(nyears))
write(recr.agepro, file=out.file,append=T )
write( '# Starting year for calculation of R' , file=out.file,append=T) #, ncolumns=(nyears))
write(recr.start.yr, file=out.file,append=T )
write( '# Ending year for calculation of R' , file=out.file,append=T) #, ncolumns=(nyears))
write(recr.end.yr, file=out.file,append=T )
write( '# Export to R flag (1= 0=)' , file=out.file,append=T) #, ncolumns=(nyears))
write(1, file=out.file,append=T )
write( '# test value' , file=out.file,append=T) #, ncolumns=(nyears))
write(test.val, file=out.file,append=T )
write('###########################################', file=out.file, append=T)
write('###### FINIS ######', file=out.file, append=T)
write( '# Fleet Names', file=out.file, append=T)
write(fleet.names, file=out.file, append=T, ncolumns=1)
write( '# Survey Names', file=out.file, append=T)
write(survey.names, file=out.file, append=T, ncolumns=1)
} # end asap setup function |
a6f5f03e3886a25771355525ebb07c6d7f73b2c5 | ca7fd6cdbe77312511b2d77115341d5bd6155a76 | /man/diffusionmap.Rd | 4e74344a81792a2a8558f38d0dd421ad11f22b43 | [] | no_license | jyuu/diffuseR | 27d83f8f5eeb18dc2c2a6ad8eb83d43d2f9c73e2 | c36c6d8de621adb317432d1f562e0a3c3669a91a | refs/heads/master | 2021-04-09T15:01:02.347144 | 2018-05-03T12:06:46 | 2018-05-03T12:06:46 | 125,540,977 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 567 | rd | diffusionmap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diffusionmap.R
\name{diffusionmap}
\alias{diffusionmap}
\title{Diffusion maps}
\usage{
diffusionmap(D, numeigen = 8, t = 0, maxdim = 50, epsilon = NULL,
rsvd = TRUE)
}
\arguments{
\item{D}{distance matrix}
\item{numeigen}{number of diffusion coordinates}
\item{t}{length of markov chain run}
\item{maxdim}{default number of coordinates if numeigen NULL}
\item{epsilon}{value to use in kernel}
\item{rsvd}{true or false parameter to use rsvd or not}
}
\description{
Diffusion maps
}
|
646561fa34e10595e205bfdc6d029d91534da55b | fe4e04f63ed88fcf6253c5de35cf1bf86c041a53 | /inst/app/global.R | 22a3be98abbdd047f1e462af07ab4c9c342a2ee1 | [] | no_license | sbalci/BitStat | 3539e15b28d4504077323e1bd0644c1b2b585b7b | 9e094c18b21f06390d6125e47c611527cf4a6ac5 | refs/heads/main | 2023-08-16T11:51:44.184751 | 2021-10-08T23:42:17 | 2021-10-08T23:42:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,930 | r | global.R | ################################################################################
## 01. Prepare Resources
################################################################################
##==============================================================================
## 01.01. Load Packages
##==============================================================================
##------------------------------------------------------------------------------
## 01.01.01. Set the library paths
##------------------------------------------------------------------------------
# .libPaths(c("/hli_appl/home/has01/R/x86_64-pc-linux-gnu-library/3.3",
# "/hli_appl/appl/bda/R/x86_64-pc-linux-gnu-library/3.3",
# "/opt/microsoft/ropen/3.4.1/lib64/R/library",
# "/hli_appl/appl/bda/R/oracle"))
##------------------------------------------------------------------------------
## 01.01.02. Load packages that are related shiny & html
##------------------------------------------------------------------------------
library(shiny)
library(shinyjs)
library(shinyWidgets)
library(shinydashboard)
library(shinydashboardPlus)
library(shinybusy)
library(colourpicker)
library(htmltools)
##------------------------------------------------------------------------------
## 01.01.03. Load packages that are tidyverse families
##------------------------------------------------------------------------------
library(dplyr)
library(readr)
library(vroom)
library(reactable)
library(glue)
library(dlookr)
library(xlsx)
library(flextable)
##==============================================================================
## 01.02. Loading Sources
##==============================================================================
#source("html_css.R")
################################################################################
## 02. Prepare Data and Meta
################################################################################
##==============================================================================
## 02.01. Global Options
##==============================================================================
## for upload file
options(shiny.maxRequestSize = 30 * 1024 ^ 2)
## for trace, if want.
options(shiny.trace = FALSE)
## for progress
options(spinner.color="#0275D8", spinner.color.background="#ffffff",
spinner.size=2)
##==============================================================================
## 02.02. Meta data
##==============================================================================
assign("import_rds", NULL, envir = .BitStatEnv)
assign("list_datasets", readRDS(paste("www", "meta", "list_datasets.rds",
sep = "/")), envir = .BitStatEnv)
assign("choosed_dataset", NULL, envir = .BitStatEnv)
assign("trans", NULL, envir = .BitStatEnv)
##==============================================================================
## 02.03. Translation meta
##==============================================================================
## set language
# i18n <- Translator$new(translation_csvs_path = "www/meta/translation")
# i18n$set_translation_language(get("language", envir = .BitStatEnv))
##==============================================================================
## 02.04. Widget meta
##==============================================================================
element_sep <- c(",", ";", "\t")
names(element_sep) <- c(translate("컴마"), translate("세미콜론"), translate("탭"))
element_quote <- c("", '"', "'")
names(element_quote) <- c(translate("없음"), translate("큰 따옴표"),
translate("작은 따옴표"))
element_diag <- list("1", "2", "3")
names(element_diag) <- c(translate("결측치"), translate("음수값"), translate("0값"))
element_manipulate_variables <- list("Rename", "Change type", "Remove",
"Reorder levels", "Reorganize levels",
"Transform", "Bin")
names(element_manipulate_variables) <- c(translate("이름 변경"),
translate("형 변환"),
translate("변수 삭제"),
translate("범주 레벨 순서변경"),
translate("범주 레벨 변경/병합"),
translate("변수변환"),
translate("비닝"))
element_change_type <- list("as_factor", "as_numeric", "as_integer",
"as_character", "as_date")
names(element_change_type) <- c(translate("범주형으로"), translate("연속형으로"),
translate("정수형으로"), translate("문자형으로"),
translate("날짜(Y-M-D)로"))
## load source for tools
for (file in list.files(c("tools"), pattern = "\\.(r|R)$", full.names = TRUE)) {
source(file, local = TRUE)
}
################################################################################
## 06. Shiny Rendering for CentOS
################################################################################
##==============================================================================
## 06.01. Shiny visualization functions
##==============================================================================
##------------------------------------------------------------------------------
## 06.01.01. Plot vis to PNG file for shiny server
##------------------------------------------------------------------------------
plotPNG <- function (func, filename = tempfile(fileext = ".png"), width = 400,
height = 400, res = 72, ...) {
if (capabilities("aqua")) {
pngfun <- grDevices::png
}
else if (FALSE && nchar(system.file(package = "Cairo"))) {
pngfun <- Cairo::CairoPNG
}
else {
pngfun <- grDevices::png
}
pngfun(filename = filename, width = width, height = height, res = res, ...)
op <- graphics::par(mar = rep(0, 4))
tryCatch(graphics::plot.new(), finally = graphics::par(op))
dv <- grDevices::dev.cur()
on.exit(grDevices::dev.off(dv), add = TRUE)
func()
filename
}
##------------------------------------------------------------------------------
## 06.01.02. Rendering for shiny server
##------------------------------------------------------------------------------
renderPlot <- function (expr, width = "auto", height = "auto", res = 72, ...,
env = parent.frame(), quoted = FALSE, func = NULL) {
installExprFunction(expr, "func", env, quoted, ..stacktraceon = TRUE)
args <- list(...)
if (is.function(width))
widthWrapper <- reactive({
width()
})
else widthWrapper <- NULL
if (is.function(height))
heightWrapper <- reactive({
height()
})
else heightWrapper <- NULL
outputFunc <- plotOutput
if (!identical(height, "auto"))
formals(outputFunc)["height"] <- list(NULL)
return(markRenderFunction(outputFunc, function(shinysession,
name, ...) {
if (!is.null(widthWrapper)) width <- widthWrapper()
if (!is.null(heightWrapper)) height <- heightWrapper()
prefix <- "output_"
if (width == "auto")
width <- shinysession$clientData[[paste(prefix, name,
"_width", sep = "")]]
if (height == "auto")
height <- shinysession$clientData[[paste(prefix, name,
"_height", sep = "")]]
if (is.null(width) || is.null(height) || width <= 0 || height <= 0)
return(NULL)
pixelratio <- shinysession$clientData$pixelratio
if (is.null(pixelratio))
pixelratio <- 1
coordmap <- NULL
plotFunc <- function() {
result <- withVisible(func())
coordmap <<- NULL
if (result$visible) {
if (inherits(result$value, "ggplot")) {
utils::capture.output(coordmap <<- getGgplotCoordmap(result$value,
pixelratio))
} else {
utils::capture.output(..stacktraceon..(print(result$value)))
}
}
if (is.null(coordmap)) {
coordmap <<- shiny:::getPrevPlotCoordmap(width, height)
}
}
outfile <- ..stacktraceoff..(
do.call(
plotPNG,
c(plotFunc, width = width * pixelratio, height = height * pixelratio,
res = res * pixelratio, args)
)
)
on.exit(unlink(outfile))
res <- list(src = shinysession$fileUrl(name, outfile,
contentType = "image/png"),
width = width, height = height,
coordmap = coordmap)
error <- attr(coordmap, "error", exact = TRUE)
if (!is.null(error)) {
res$error <- error
}
res
}))
}
|
ecc31288694267867be42a06df63b76d424eb6d1 | f1c7c47a99dde3347a17e320f88968ed1acdae87 | /inst/odin/SIS_deterministic_odin2.R | d2663d6d3775e7f6547ee5d15f1634bd86bde5fe | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | bobverity/bobFunctionsEpi | 4d4455151cf44b0f1b698ea4a2713f71db16da6f | e0a9bb51b02cb4b6dd134dbb24503a6957e146b4 | refs/heads/master | 2021-07-15T17:33:25.550300 | 2017-10-21T04:21:28 | 2017-10-21T04:21:28 | 107,684,612 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 212 | r | SIS_deterministic_odin2.R |
# derivatives
deriv(S) <- -beta*S*I/N + r*I
deriv(I) <- beta*S*I/N - r*I
# initial conditions
initial(S) <- N - I_init
initial(I) <- I_init
# parameters
beta <- user()
r <- user()
I_init <- user()
N <- user()
|
3580daa81a2cc24c108a02e001d2080b18d1b608 | 1dcfea8d5cdc1c7c5d0a96d89e639102da0dbbd4 | /man/stop_and_log.Rd | 9b246c6424ed8d1a9f26f17f5e1d74d432eb2233 | [] | no_license | aukkola/FluxnetLSM | 707295d0dd4ccf1f5b43b09896b947e5f10b5e84 | 2716bc87bcc2ba148de7896bfad7fe6631639431 | refs/heads/master | 2023-06-24T20:21:45.371934 | 2023-06-20T05:27:53 | 2023-06-20T05:27:53 | 73,448,414 | 29 | 15 | null | 2022-10-04T23:53:08 | 2016-11-11T05:24:51 | R | UTF-8 | R | false | true | 301 | rd | stop_and_log.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utility_functions.R
\name{stop_and_log}
\alias{stop_and_log}
\title{Writes site log and then aborts, reporting error}
\usage{
stop_and_log(error, site_log)
}
\description{
Writes site log and then aborts, reporting error
}
|
0fa621bd63dea1a99cf3c938d6e5e37eed87c33d | 8f153e0489ad6f6fd06636d0596bf44bba512cc3 | /workspace2/RLab_Chap02(변수와벡터)/lab02.R | 685b26232fe285356da9c0b89d6818358d397bda | [] | no_license | mjh1583/BigDataEducation | e2d9318af48981bc241d5843c29941abc678082c | 01d317ead14459c7ecb11242227018c75a35e835 | refs/heads/main | 2023-03-06T00:24:23.183550 | 2021-02-19T06:01:36 | 2021-02-19T06:01:36 | 304,179,459 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,769 | r | lab02.R | # 데이터 종류
# 1. 숫자(numeric)
# 2. 문자(character) : 큰따옴표, 작은따옴표로 둘러싸인 문자형
# 3. 논리형(logical) : TRUE,T,FALSE,F
x <- c(1, 2, 3, 4, 5) # 정수형 데이터, 변수에 할당
rm(X) # 변수 삭제
x
class(x) # 데이터의 종류나 구조를 출력해주는 함수. 숫자형으로 출력
x <- c(0.1, 0.2, 0.3, 0.4, 0.5) # 실수형 데이터, 변수에 할당
x
class(x) #숫자형으로 출력됨
x <- c(1L, 2L, 3L, 4L, 5L) # 정수형으로 출력하려고 끝에 명시적으로 L을 붙임
x
class(x) # 정수형으로 출력됨
x <- c('a', 'b', 'c', '가나다라', '나', '다')
x
class(x) # 문자형으로 출력됨
x <- c('1', '2', '3')
x
class(x) # 문자형으로 출력됨
# 날짜형
x <- '2020-10-15'
x
class(x) # 문자형으로 출력됨
# as.Date()함수 : 문자형 데이터 값을 날짜형으로 변환함
x <- as.Date('2020-10-15') # 날짜형으로 변환
x
class(x)
y <- as.Date('2020-12-21') # 날짜형으로 변환
y
class(y)
x-y
y-x # 날짜 연산
x <- T # 논리형 데이터 값(참)
y <- F # 논리형 데이터 값(거짓)
class(x); class(y)
x & x # TRUE 그리고 TRUE는 TRUE
x & y # TRUE 그리고 FALSE는 FALSE
(1<2) & (3>4) # 동시 만족 여부
x | x # TRUE 그리고 TRUE는 TRUE
x | y # TRUE 그리고 FALSE는 TRUE
(1<2) | (3>4) # 선택 만족 여부
!(1<2) #TRUE의 부정은 FALSE
# 그 외 데이터 표현
# NA (Not Available) : 측정되지 않은 값 => 사용할 수 없음 .결측치
# NAN (Not a Number) : 연산 불가능, 부적절한 값
# Inf, -Inf : 무한값(값이 너무 크거나, 작아 연산이 어려움)
# NULL : 정의 되지 않은 값(없음)
|
ec2ae6ca4d48e162fc339166848236ab17d00683 | cbe680b5f5758ea50ab5e7291bde9462f8794a31 | /man/gsg.Rd | 1102553beb2f0d5aaaa2846931480756c1d02ee8 | [] | no_license | AWF-GAUG/gsg | 2fbe6d3ca3b15cf011f8b47c50afb42b94cb1ec4 | ae3ba409c9f84d9556f114dc530f31c842bba363 | refs/heads/master | 2020-04-20T07:22:14.019752 | 2019-06-06T06:40:58 | 2019-06-06T06:40:58 | 168,708,384 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 270 | rd | gsg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsg_package.R
\docType{package}
\name{gsg}
\alias{gsg}
\alias{gsg-package}
\title{gsg}
\description{
Gloabal Sampling Grid shiny app
}
\examples{
# Start app using the launcher
launch_app()
}
|
c0a7577e06b762b1f1e248a22d0dd2b4440ae2ee | bdde69a5e7b644e4958c1b7abe060751407a8579 | /R/getTimingLSAM.R | c255612c10735fbbfde7b42ef59cb1e5474ca252 | [] | no_license | cran/discharge | f6d33605d8e54df21a4b45182c4cd62f8507e668 | c58d359b21e2b53c46fd0bcd1fa445897449075a | refs/heads/master | 2020-12-21T22:31:48.003164 | 2019-03-08T14:42:48 | 2019-03-08T14:42:48 | 236,584,815 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,418 | r | getTimingLSAM.R | # ............
# Timing HSAM
# ............
#' Time of occurence of Low Spectral Anomaly Magnitude (LSAM)
#'
#' Compute the number of days separating LSAM and reference point for each year.
#'
#' @param index.lsam A scalar/vector of index of LSAM values in given year/years
#' @param index.ref A scalar/vector of index of reference point in given year/years
#' @param years (optional) A vector of years corresponding to LSAM and ref values.
#' This argument can be NULL if the LSAM and ref values are scalars.
#' @param for.year (optional) Calculate timing (LSAM) only for the given year in this argument.
#' If argument is omitted, timing (LSAM) values for all years are calculated.
#' @return Scalar timing LSAM value if the inputs are scalars, or a Data frame containing two Columns:
#' \tabular{ll}{
#' \code{year} \tab First column, represents year \cr
#' \code{timing.lsam} \tab Second column, represents lsam timing values
#' }
#'
#' @examples
#' # load sample data
#' data("sycamore")
#' x = sycamore
#'
#' # get streamflow object for the sample data
#' x.streamflow = asStreamflow(x)
#'
#' # prepare baseline signal
#' x.bl = prepareBaseline(x.streamflow)
#'
#' # get signal parts
#' x.sp = getSignalParts(x.bl$pred2, candmin = c(40:125), candmax = c(190:330),
#' years = x.streamflow$data$year,
#' months = x.streamflow$data$month,
#' jdays = x.streamflow$data$jday)
#'
#' # get LSAM values
#' lsam = getLSAM(x.bl$resid.sig, x.streamflow$data$year)
#'
#' # timing LSAM
#' tlsam = getTimingLSAM(lsam$Index.all, x.sp$peak.index, x.sp$year)
#'
#' @export
getTimingLSAM = function(index.lsam, index.ref, years = NULL, for.year = NULL) {
# validate inputs
assert.numeric.vector(index.lsam)
assert.numeric.vector(index.ref)
assert.numeric.vector(years)
assert.equal.length(index.lsam, index.ref, years)
assert.for.year(for.year)
if (is.null(for.year)) {
timing.lsam = abs(index.lsam - index.ref)
timing.data = data.frame(years, timing.lsam)
} else {
indices.years = which(years == for.year)
timing.lsam = abs(index.lsam[indices.years] - index.ref[indices.years])
timing.data = data.frame(years[indices.years], timing.lsam)
}
colnames(timing.data) = c("year", "timing.lsam")
return(timing.data)
}
|
deed5d7722eef27c4bb884109821d24972c73ce6 | d78baf7d5541f723c08e714b8371ee605b3123de | /man/mmbr_get_one_variable_lfsr.Rd | aa528dbd9518e3a6dbe257fc71ed1d4ad553d9f7 | [
"MIT"
] | permissive | zouyuxin/mmbr | f8b9ee57097ff39f2733a34bdd84e853e21502f3 | 7a7ab16386ddb6bb3fdca06b86035d66cde19245 | refs/heads/master | 2020-12-05T22:13:31.290877 | 2020-01-09T06:35:11 | 2020-01-09T06:35:11 | 174,035,862 | 0 | 0 | MIT | 2019-03-05T23:23:53 | 2019-03-05T23:23:51 | null | UTF-8 | R | false | true | 320 | rd | mmbr_get_one_variable_lfsr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{mmbr_get_one_variable_lfsr}
\alias{mmbr_get_one_variable_lfsr}
\title{Get lfsr per condition per variable}
\usage{
mmbr_get_one_variable_lfsr(lfsr, alpha)
}
\description{
Get lfsr per condition per variable
}
\keyword{internal}
|
63551a6c2bef05e70584e83c3bbcb5e609520998 | 753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed | /service/paws.elasticache/man/revoke_cache_security_group_ingress.Rd | 44ed9e7f22f97768712a2136aa9682257d984c90 | [
"Apache-2.0"
] | permissive | CR-Mercado/paws | 9b3902370f752fe84d818c1cda9f4344d9e06a48 | cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983 | refs/heads/master | 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,191 | rd | revoke_cache_security_group_ingress.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.elasticache_operations.R
\name{revoke_cache_security_group_ingress}
\alias{revoke_cache_security_group_ingress}
\title{Revokes ingress from a cache security group}
\usage{
revoke_cache_security_group_ingress(CacheSecurityGroupName,
EC2SecurityGroupName, EC2SecurityGroupOwnerId)
}
\arguments{
\item{CacheSecurityGroupName}{[required] The name of the cache security group to revoke ingress from.}
\item{EC2SecurityGroupName}{[required] The name of the Amazon EC2 security group to revoke access from.}
\item{EC2SecurityGroupOwnerId}{[required] The AWS account number of the Amazon EC2 security group owner. Note that this is not the same thing as an AWS access key ID - you must provide a valid AWS account number for this parameter.}
}
\description{
Revokes ingress from a cache security group. Use this operation to disallow access from an Amazon EC2 security group that had been previously authorized.
}
\section{Accepted Parameters}{
\preformatted{revoke_cache_security_group_ingress(
CacheSecurityGroupName = "string",
EC2SecurityGroupName = "string",
EC2SecurityGroupOwnerId = "string"
)
}
}
|
0253853fd8941e8e6c7365554327eeafbd78f67b | a249beeec2598922dc69817a68d5bc7e6b1586ab | /man/match_maker.Rd | 505d1766ac755d47ce0f45e84591f08d75a8f136 | [] | no_license | aedobbyn/dobtools | 9c9b56241c65d37d318923bd546a03ce5963b43f | f63664430648e48f6ded8dade3afe55699c025bf | refs/heads/master | 2021-01-19T21:24:33.469420 | 2019-05-03T21:13:28 | 2019-05-03T21:13:28 | 101,250,864 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 849 | rd | match_maker.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_maker.R
\name{match_maker}
\alias{match_maker}
\title{Fuzzy Text Matching: element}
\usage{
match_maker(e, matches, max_dist = 5)
}
\arguments{
\item{e}{An character}
\item{matches}{A vector in which to look for matches with e.}
\item{max_dist}{Set maxDist to be used in stringdist::amatch}
}
\description{
Find the best match (or no match at all) to string inputs.
}
\examples{
iris <- iris \%>\% tibble::as_tibble()
iris \%>\% dplyr::mutate(
foo = purrr::map_chr(as.character(Species), match_maker,
matches = c("Virginia", "California", "Sarasota"))
)
iris \%>\% dplyr::mutate(
foo = purrr::map_chr(as.character(Species), match_maker,
matches = c("Virginia", "California", "Sarasota"), max_dist = 20)
)
}
\keyword{match}
|
09e9bc4b392da7fd75b719200adf5b121398b60b | 235cb8096c5ce77fbe2ce2d259c26978f043a91d | /_07_TextAnalytics/_10-TOPICMDL-USCongress.R | e2ab86cb264ef42ff45f4ff4e830aacac7487a52 | [
"MIT"
] | permissive | suvofalcon/R-SpringboardDS | 85e455063a62d1cd26500e87721738296ac79462 | 50652f363245b1c788233ed70b1c90b02f7f0281 | refs/heads/master | 2021-06-16T20:11:26.672816 | 2021-05-12T16:32:14 | 2021-05-12T16:32:14 | 202,501,684 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,014 | r | _10-TOPICMDL-USCongress.R | # *************************************************************************************************************
# Text Mining - Machine Learning Algorithms for Topic Modelling
#
# Dataset Used - USCongress.csv
#
# Building an LDA based Topic Model based on the "text" column in the dataset
# 1. ID - A unique identifier for the bill.
# 2. cong - The session of congress that the bill first appeared in.
# 3. billnum - The number of the bill as it appears in the congressional docket.
# 4. h_or_sen - A field specifying whether the bill was introduced in the House (HR) or the Senate (S).
# 5. major - A manually labeled topic code corresponding to the subject of the bill.
#
# Although a manually labeled topic code is given, but we will use the text column to build a topic model of our own
# **************************************************************************************************************
rm(list = ls()) # We clear all runtime variables in the Environment
# Use of external libraries
library(tm)
library(RTextTools)
library(topicmodels)
library(ggplot2)
# Load the USCongress data
# we will load the dataset (this is from SUVOS-TIME-CAPS)
# The load command will be slightly different for different Operating Systems
switch(Sys.info() [['sysname']],
Windows = {USCongress <- read.csv("//SUVOS-TIME-CAPS/Data/CodeMagic/Data Files/TextMining/Assignments/Topic5-Topic modelling/dataset/USCongress.csv",
header = TRUE, stringsAsFactors = FALSE)},
Linux = {USCongress <- read.csv("//SUVOS-TIME-CAPS/Data/CodeMagic/Data Files/TextMining/Assignments/Topic5-Topic modelling/dataset/USCongress.csv",
header = TRUE, stringsAsFactors = FALSE)},
Darwin = {USCongress <- read.csv("//Volumes/Data/CodeMagic/Data Files/TextMining/Assignments/Topic5-Topic modelling/dataset/USCongress.csv",
header = TRUE, stringsAsFactors = FALSE)})
# Check the data load
dim(USCongress) # 4449 rows and 6 columns
head(USCongress)
str(USCongress)
# We will create a Document Term Matrix, by taking only the text columns from data and We will also clean the data alongside
docMatrix <- create_matrix(as.vector(USCongress$text), language = "english", removeNumbers = TRUE,
removePunctuation = TRUE, removeSparseTerms = 0, removeStopwords = TRUE, stripWhitespace = TRUE, toLower = TRUE)
# Lets inspect the first 10 rows and first 10 columns
inspect(docMatrix[1:10, 1:10])
# ********** Find Optimum Topic Numbers **************************************************** #
# Lets find the best number of topics for these set of documents
# For this we need to build multiple LDA models on these set documents and then take the log likelihood
# Lets say we decide the topics would be between 2 - 30 (this range is arbitary and can be anything, but CPU intensive)
# k is - we decide how many different topics we need to identify within these set of documents
# So intermediate_model, will contain models with number of topics from 2 to 30
intermediate_model <- lapply(seq(2, 30, by = 1), function (k){LDA(docMatrix, k)})
# Next for every LDA model with k topics (LDA model with topic-2, LDA model with topic-3 etc etc... we are going to find out the log likelihood)
# We will transform the same intermediate_model
log_model <- as.data.frame(as.matrix(lapply(intermediate_model, logLik)))
# Log liklihood determines how good the model is with the associated k value (number of topics) -- higher the value of the log likelihood
# better the model is performing with the required number of topics (k)
# It is observed that as the topic number keeps on increasing, the log likelihood also keeps on increasing - till the time, we reach at
# the optimum number of topics, beyond which further increase on topic numbers decreases the log likelihood
final_model <- data.frame(topics = c(seq(2,30, by = 1)), log_likelihood = as.numeric(as.matrix(log_model)))
final_model
# to visualize this
ggplot(final_model, aes(x = topics, y = log_likelihood)) + geom_line(col = "blue") + geom_point()
# We see that log_likelihood increases as the number of topics increases
# to find the optimum number of topics (max log_likelihood)
kOptimum <- final_model[which.max(final_model$log_likelihood), 1]
cat("Best Topic Number is : ",kOptimum) # This is what we can verify from the graph as well
# ********** Classify individual Text to Topic Numbers **************************************************** #
# We will now use this topic number to classify individual Text in the dataset into one of topic numbers
# Divide data into Training and test matrix (we will use 70% for training and 30% for test data)
train_docMatrix <- docMatrix[1:3114, ]
test_docMatrix <- docMatrix[3115:4449, ]
# Building the model on train data - First parameter is the training data and the second is the number of topic we want from the document
train_lda <- LDA(train_docMatrix, kOptimum)
# Once we have run the LDA, now we want to see for every document, we would see three topics amongst max of 29 present in the entire superset.
# We want to see just 3 topics from every document.
# For every document, it shows three topic numbers which are associated with that document in some proportions
get_topics(train_lda, 3)
# If we want to see the highest probability occurence of topic for every document
# This is calculated internally by the probability values from the topic distribution itself
topics(train_lda)
train.topics <- topics(train_lda)
# To see five terms in each of the topics
get_terms(train_lda, 5)
# If we want to see the term which has occured the max in each of Topic
terms(train_lda)
# Now we will apply this model in the test subset
test.topics <- posterior(train_lda, test_docMatrix)
# Now lets see the contents - 10 rows and 10 columns
test.topics$topics[1:10, 1:10] # The row number starts from 701 because the test data is from 701 to 1000
# This shows for every document what is the probability (distribution) of the topics
# Now we want to assign the topic which has the highest probability for every document in the test.topics
test.topics <- apply(test.topics$topics, 1, which.max)
test.topics # We will see for all the documents in the test subset that has been assigned a topic code (the one which has the highest probability)
# We will now join the predicted topic number to the original test data
USCongressTest <- USCongress[3115:4449, ]
finalUSCongressTestDataSet <- data.frame(Title = USCongressTest$text, Pred_topic = test.topics)
head(finalUSCongressTestDataSet)
View(finalUSCongressTestDataSet)
# to visualize the distribution of topics
topic_dist <- as.data.frame(table(finalUSCongressTestDataSet$Pred_topic))
ggplot(topic_dist, aes(x = Var1, y = Freq)) + geom_bar(stat = "identity") + geom_text(aes(label = Freq), vjust = 1.5, colour = "white") +
xlab("Topic Numbers") + ylab("Number of Documents") + labs(title = "Topic Distribution by Documents")
|
fa9fbd0eb2dd60b4bcf33ed548deb551aee5c301 | e8a94fd1bcf437ebf2233a7dbe4d5a2fc2de6101 | /man/sracipeSimulate.Rd | 661f2371ab6177fd3ca4d1baac2d83bc40aa958f | [
"MIT"
] | permissive | lusystemsbio/sRACIPE | 573c291a09772b89d556f921bd4d1b57901fd19e | 5e6a0633b274e4390f1266cef2e54d074afe3e0f | refs/heads/master | 2022-04-02T08:30:58.547780 | 2020-02-11T20:48:00 | 2020-02-11T20:48:00 | 117,882,987 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 8,608 | rd | sracipeSimulate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulateGRC.R
\name{sracipeSimulate}
\alias{sracipeSimulate}
\title{Simulate a gene regulatory circuit}
\usage{
sracipeSimulate(
circuit = "inputs/test.tpo",
config = config,
anneal = FALSE,
knockOut = NA_character_,
numModels = 2000,
paramRange = 100,
prodRateMin = 1,
prodRateMax = 100,
degRateMin = 0.1,
degRateMax = 1,
foldChangeMin = 1,
foldChangeMax = 100,
hillCoeffMin = 1L,
hillCoeffMax = 6L,
integrateStepSize = 0.02,
simulationTime = 50,
nIC = 1L,
nNoise = 0L,
simDet = TRUE,
initialNoise = 50,
noiseScalingFactor = 0.5,
shotNoise = 0,
scaledNoise = FALSE,
outputPrecision = 12L,
printStart = 50,
printInterval = 10,
stepper = "RK4",
thresholdModels = 5000,
plots = FALSE,
plotToFile = FALSE,
genIC = TRUE,
genParams = TRUE,
integrate = TRUE,
rkTolerance = 0.01,
timeSeries = FALSE,
...
)
}
\arguments{
\item{circuit}{data.frame or character. The file containing the circuit or}
\item{config}{(optional) List. It contains simulation parameters
like integration method
(stepper) and other lists or vectors like simParams,
stochParams, hyperParams, options, thresholds etc.
The list simParams contains values for parameters like the
number of models (numModels),
simulation time (simulationTime), step size for simulations
(integrateStepSize), when to start recording the gene expressions
(printStart), time interval between recordings (printInterval), number of
initial conditions (nIC), output precision (outputPrecision), tolerance for
adaptive runge kutta method (rkTolerance), parametric variation (paramRange).
The list stochParams contains the parameters for stochastic simulations like
the number of noise levels to be simulated (nNoise), the ratio of subsequent
noise levels (noiseScalingFactor), maximum noise (initialNoise), whether to
use same noise for all genes or to scale it as per the median expression of
the genes (scaledNoise), ratio of shot noise to additive noise (shotNoise).
The list hyperParams contains the parameters like the minimum and maximum
production and degration of the genes, fold change, hill coefficient etc.
The list options includes logical values like annealing (anneal), scaling of
noise (scaledNoise), generation of new initial conditions (genIC), parameters
(genParams) and whether to integrate or not (integrate). The user
modifiable simulation options can be specified as other arguments. This
list should be used if one wants to modify many settings for multiple
simulations.}
\item{anneal}{(optional) Logical. Default FALSE. Whether to use annealing
for stochastic simulations. If TRUE, the gene expressions at higher noise
are used as initial conditions for simulations at lower noise.}
\item{knockOut}{(optional) List of character or vector of characters.
Simulation after knocking out one or more genes. To knock out all the genes
in the circuit, use \code{knockOut = "all"}. If it is a vector, then all
the genes in the vector will be knocked out simultaneously.}
\item{numModels}{(optional) Integer. Default 2000. Number of random models
to be simulated.}
\item{paramRange}{(optional) numeric (0-100). Default 100. The relative
range of parameters (production rate, degradation rate, fold change).}
\item{prodRateMin}{(optional) numeric. Default 1. Minimum production rate.}
\item{prodRateMax}{(optional) numeric. Default 100. Maximum production rate.}
\item{degRateMin}{(optional) numeric. Default 0.1. Minimum degradation rate.}
\item{degRateMax}{(optional) numeric. Default 1. Maximum degradation rate.}
\item{foldChangeMin}{(optional) numeric. Default 1. Minimum fold change for
interactions.}
\item{foldChangeMax}{(optional) numeric. Default 100. Maximum fold change for
interactions.}
\item{hillCoeffMin}{(optional) integer. Default 1. Minimum hill coefficient.}
\item{hillCoeffMax}{(optional) integer. Default 6. Maximum hill coefficient.}
\item{integrateStepSize}{(optional) numeric. Default 0.02. step size for
integration using "EM" and "RK4" steppers.}
\item{simulationTime}{(optional) numeric. Total simulation time.}
\item{nIC}{(optional) integer. Default 1. Number of initial conditions to be
simulated for each model.}
\item{nNoise}{(optional) integer. Default 0.
Number of noise levels at which simulations
are to be done. Use nNoise = 1 if simulations are to be carried out at a
specific noise. If nNoise > 0, simulations will be carried out at nNoise
levels as well as for zero noise. "EM" stepper will be used for simulations
and any argument for stepper will be ignoired.}
\item{simDet}{(optional) logical. Default TRUE.
Whether to simulate at zero noise as well also when using nNoise > 0.}
\item{initialNoise}{(optional) numeric.
Default 50/sqrt(number of genes in the circuit). The initial value of noise
for simulations. The noise value will decrease by a factor
\code{noiseScalingFactor} at subsequent noise levels.}
\item{noiseScalingFactor}{(optional) numeric (0-1) Default 0.5.
The factor by which noise
will be decreased when nNoise > 1.}
\item{shotNoise}{(optional) numeric. Default 0.
The ratio of shot noise to additive
noise.}
\item{scaledNoise}{(optional) logical. Default FALSE. Whether to scale the
noise in each gene by its expected median expression across all models. If
TRUE the noise in each gene will be proportional to its expression levels.}
\item{outputPrecision}{(optional) integer. Default 12.
The decimal point precison of
the output.}
\item{printStart}{(optional) numeric (0-\code{simulationTime}).
Default \code{simulationTime}. To be used only when \code{timeSeries} is
\code{TRUE}.
The time from which the output should be recorded. Useful for time series
analysis and studying the dynamics of a model for a particular initial
condition.}
\item{printInterval}{(optional) numeric (\code{integrateStepSize}-
\code{simulationTime - printStart}). Default 10. The separation between
two recorded time points for a given trajectory.
To be used only when \code{timeSeries} is
\code{TRUE}.}
\item{stepper}{(optional) Character. Stepper to be used for integrating the
differential equations. The options include \code{"EM"} for Euler-Maruyama
O(1), \code{"RK4"}
for fourth order Runge-Kutta O(4) and \code{"DP"} for adaptive stepper based
Dormand-Prince algorithm. The default method is \code{"RK4"}
for deterministic
simulations and the method defaults to \code{"EM"}
for stochastic simulations.}
\item{thresholdModels}{(optional) integer. Default 5000. The number of
models to be used for calculating the thresholds for genes.}
\item{plots}{(optional) logical Default \code{FALSE}.
Whether to plot the simuated data.}
\item{plotToFile}{(optional) Default \code{FALSE}. Whether to save the plots
to a file.}
\item{genIC}{(optional) logical. Default \code{TRUE}. Whether to generate
the initial conditions. If \code{FALSE}, the initial conditions must be
supplied as a dataframe to \code{circuit$ic}.}
\item{genParams}{(optional) logical. Default \code{TRUE}. Whether to generate
the parameters. If \code{FALSE}, the parameters must be
supplied as a dataframe to \code{circuit$params}.}
\item{integrate}{(optional) logical. Default \code{TRUE}. Whether to
integrate the differential equations or not. If \code{FALSE}, the function
will only generate the parameters and initial conditions. This can be used
iteratively as one can fist generate the parameters and initial conditions
and then modify these before using these modified values for integration.
For example, this can be used to knockOut genes by changing the production
rate and initial condition to zero.}
\item{rkTolerance}{(optional) numeric. Default \code{0.01}. Error tolerance
for adaptive integration method.}
\item{timeSeries}{(optional) logical. Default \code{FALSE}.
Whether to generate time series for a single model instead of performing
RACIPE simulations.}
\item{...}{Other arguments}
}
\value{
\code{RacipeSE} object. RacipeSE class inherits
\code{SummarizedExperiment} and contains the circuit, parameters,
initial conditions,
simulated gene expressions, and simulation configuration. These can be
accessed using correponding getters.
}
\description{
Simulate a gene regulatory circuit using its topology as the
only input. It will generate an ensemble of random models.
}
\section{Related Functions}{
\code{\link{sracipeSimulate}}, \code{\link{sracipeKnockDown}},
\code{\link{sracipeOverExp}}, \code{\link{sracipePlotData}}
}
\examples{
data("demoCircuit")
rSet <- sRACIPE::sracipeSimulate(circuit = demoCircuit)
}
|
5152a90bba5321fa95bf4c96b65c650a6c07ed0c | a607b44335be39a267f5b78908189d5605c10145 | /man/CASALpars.Rd | 1776dc7f43f922cb4251c0e3e42d880a5d4d1f10 | [] | no_license | tcarruth/MSEtool | 75d4c05b44b84bb97e8f9f85d4dfa7f4246453d5 | c95c7bcfe9bf7d674eded50e210c3efdc7c2725f | refs/heads/master | 2021-03-27T20:44:23.407068 | 2020-10-13T15:20:26 | 2020-10-13T15:20:26 | 116,047,693 | 2 | 4 | null | 2020-02-27T23:59:15 | 2018-01-02T19:06:42 | R | UTF-8 | R | false | true | 547 | rd | CASALpars.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CASAL2OM.R
\name{CASALpars}
\alias{CASALpars}
\title{Rips MLE estimates from CASAL file structure}
\usage{
CASALpars(CASALdir)
}
\arguments{
\item{CASALdir}{A folder with Stock Synthesis input and output files in it}
}
\value{
A list.
}
\description{
A function that uses the file location of a fitted CASAL assessment model including input files
to extract data required to populate an OMx class operating model.
}
\seealso{
\link{CASAL2OM}
}
\author{
T. Carruthers
}
|
1f779b8ea30b914d8feb0e21e0a4d79bdbb842a2 | 77ac9a5c4b82685afb028d89c1cad77b15011238 | /code/HapSim.R | 6cff2f0dc58fbaa51381daa05f1e4b8e1f35567b | [] | no_license | powellow/Interactions_In_Breeding | 27736ef1cb7dc84e213c8c5f2bd58a0e4b161a72 | 305c0ee4deaed4237901d62b3951df1db716dd2b | refs/heads/master | 2023-02-25T18:53:12.824874 | 2021-02-03T04:03:14 | 2021-02-03T04:03:14 | 335,483,994 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 466 | r | HapSim.R | RPG = newPop(founderPop)
dat <- pullQtlHaplo(RPG)
info <- hapsim::haplodata(dat)
info$freqs <- rep(start_allele_freq,n_chr) #frequencies for 0 allele
hap=info
haplos <- hapsim::haplosim(n_founders*2,hap)
haplos$freqs
for (each in 1:n_chr){
assign(paste0("chr",each),matrix(as.integer(haplos$data[,each])))
}
haplotypes <- list(chr1,chr2,chr3,chr4,chr5,chr6,chr7,chr8,chr9,chr10)
genMapRep = rep(list(seq(0,1,length.out=n_qtl)),n_chr)
|
a035761a8349ee7a3515b48d17c402f11316c18e | b320edf9c9d79cdd4aef01d6aca11b1f9f587efe | /integration_seurat.R | ec105bd9b5466562ca2448c6c6ba2f918e5a409a | [] | no_license | cjwong20/TFR_2021 | f92b712cfb2f021ff9be57d78a39c8a54958d981 | 94a3214b001c3e62d1f48eb121964c554fed14af | refs/heads/main | 2023-05-31T14:25:31.672487 | 2021-06-14T11:38:57 | 2021-06-14T11:38:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,390 | r | integration_seurat.R | #!/usr/bin/R5
######################
# Seurat integration #
######################
# This is going to be a code to analyze single cell data with Seurat alignment
### Installing ### ---
# devtools::install_github(repo = 'satijalab/seurat', ref = 'release/3.0')
.libPaths('~/R/newer_packs_library/3.5/')
source('/mnt/BioHome/ciro/scripts/functions/handy_functions.R')
deps <- c('Seurat', 'ggplot2', 'cowplot')
load_packs(deps, v = T)
root <- '~/large/simon/results/integration'
setwdc(root)
annot_list_tags <- theObjectSavedIn(paste0(root, '/data/annot_list_tags_10TPM.RData'))
edata_list <- theObjectSavedIn(paste0(root, '/data/edata_list.RData'))
names(edata_list) # we will take Lambrechts out
edata_list <- edata_list[-5]
annot_list_tags <- annot_list_tags[-5]
# Create Seurat object per data set
pancreas.list <- lapply(names(annot_list_tags), function(x){
CreateSeuratObject(counts = edata_list[[x]], meta.data = annot_list_tags[[x]])
}); names(pancreas.list) <- names(annot_list_tags)
sets <- names(pancreas.list)#[c(1:4)]
setwdc(paste0(root, '/seurat_', length(sets), "sets"))
dir.create('qcs')
for (i in 1:length(x = pancreas.list)) {
cat(names(pancreas.list)[i], '\n')
pancreas.list[[i]][["percent.mt"]] <- PercentageFeatureSet(pancreas.list[[i]], pattern = "^MT-")
plot1 <- FeatureScatter(pancreas.list[[i]], feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(pancreas.list[[i]], feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
pdf(paste0("qcs/", names(pancreas.list)[i], ".pdf"), 12, 7)
print(CombinePlots(plots = list(plot1, plot2)))
dev.off()
# thesecells <- rownames(pancreas.list[[i]]@meta.data[pancreas.list[[i]]@meta.data[, "nFeature_RNA"] < tvar, ])
# plot1 <- FeatureScatter(pancreas.list[[i]], feature1 = "nCount_RNA", feature2 = "percent.mt", cells = thesecells)
# plot2 <- FeatureScatter(pancreas.list[[i]], feature1 = "nCount_RNA", feature2 = "nFeature_RNA", cells = thesecells)
# pdf(paste0("qcs/", names(pancreas.list)[i], "_filtered.pdf"), 12, 7)
# print(CombinePlots(plots = list(plot1, plot2)))
# dev.off()
# pancreas.list[[i]] <- subset(pancreas.list[[i]], subset = nFeature_RNA > 200 & nFeature_RNA < 2500)
}
summ_filters <- data.frame(rbindlist(lapply(pancreas.list, function(x){
mytab <- t(data.frame("X" = c(range(x@meta.data[, "percent.mt"]),
mean(x@meta.data[, "percent.mt"]),
range(x@meta.data[, "nFeature_RNA"]),
quantile(x@meta.data[, "nFeature_RNA"], prob = 0.998))))
colnames(mytab) <- c('MinMTpct', 'MaxMTpct','MeanMTpct' , 'MinFeat', 'MaxFeat', 'Q99.8%')
data.frame(mytab)
})))
rownames(summ_filters) <- names(pancreas.list)
summ_filters
# columns for visualisation
orignames <- c("orig.set", "orig.majorCluster", 'tag_FOXP3', 'tag_ct')
npcs <- 30
if(file.exists('integrated.RData')) cat('Go to file creating line\n')
for (i in 1:length(x = pancreas.list)) {
cat(names(pancreas.list)[i], '\n')
pancreas.list[[i]] <- NormalizeData(object = pancreas.list[[i]], verbose = FALSE)
pancreas.list[[i]] <- FindVariableFeatures(object = pancreas.list[[i]],
selection.method = "vst", nfeatures = 2000, verbose = FALSE)
cat(commas(VariableFeatures(object = pancreas.list[[i]]), 10), '\n')
}
# Check most variable genes overlaps
myvargenes <- unique(unlist(lapply(pancreas.list, VariableFeatures)))
ogenes <- sapply(pancreas.list, function(x) myvargenes %in% VariableFeatures(x) )
rownames(ogenes) <- myvargenes
head(ogenes, 20)
myvargenes <- myvargenes[apply(ogenes, 1, all)]
length(myvargenes)
pancreas.anchors <- FindIntegrationAnchors(object.list = pancreas.list[sets], dims = 1:npcs)
pancreas.integrated <- IntegrateData(anchorset = pancreas.anchors, dims = 1:npcs)
# switch to integrated assay. The variable features of this assay are
# automatically set during IntegrateData
DefaultAssay(object = pancreas.integrated) <- "integrated"
# Run the standard workflow for visualization and clustering
pancreas.integrated <- ScaleData(object = pancreas.integrated, verbose = FALSE)
pancreas.integrated <- RunPCA(object = pancreas.integrated, npcs = npcs, verbose = FALSE)
pdf(paste0('sdevPCs_', npcs,'PCs.pdf'), width = 10, height = 8)
ElbowPlot(object = pancreas.integrated, ndims = npcs)
graphics.off()
pc_sdev <- pancreas.integrated@reductions$pca@stdev
get_elbow(1:length(pc_sdev), pc_sdev, seq(95, 70, by = -5)/100)
chnpcs <- 15
nres <- 0.2
redu <- "umap"
setwdc(paste0('~/large/simon/results/integration/seurat_', length(sets), "sets/PC", chnpcs, 'R', nres))
if(redu == "umap"){
cat("Runnning UMAP\n")
pancreas.integrated <- RunUMAP(object = pancreas.integrated, reduction = "pca", dims = 1:chnpcs, min.dist = 0.05, spread = 2)
}else{
cat("Runnning t-SNE\n")
redu <- "tsne"
pancreas.integrated <- RunTSNE(object = pancreas.integrated, reduction = "pca", dims = 1:chnpcs, check_duplicates = FALSE,
tsne.method = "FIt-SNE", fast_tsne_path = '/mnt/BioHome/ciro/bin/FIt-SNE2/bin/fast_tsne')
}
for(orig in orignames){
tvar <- length(unique(pancreas.integrated@meta.data[, orig]))
pdf(paste0('integrated_', sub('orig.', '', orig), '.pdf'), height = 10, width = ifelse(tvar > 15, 14, 10))
print(DimPlot(object = pancreas.integrated, reduction = redu, group.by = orig))
graphics.off()
}
pancreas.integrated <- FindNeighbors(object = pancreas.integrated, dims = 1:chnpcs)
pancreas.integrated <- FindClusters(object = pancreas.integrated, resolution = nres)
tailmat(pancreas.integrated[[]], 10)
gby <- paste0("integrated_snn_res.", nres)
pancreas.integrated@meta.data[, gby] <- as.character(pancreas.integrated@meta.data[, gby])
pdf(paste0('clusters_', gby, '.pdf'), height = 8, width = 8)
DimPlot(object = pancreas.integrated, reduction = redu, group.by = gby)
graphics.off()
freq_tablep(metadata = pancreas.integrated@meta.data, cnames = c(gby, 'orig.set'),
pnames = c('Clusters in sets', 'Sets in clusters'), dowrite = TRUE)
markers <- read.csv('/mnt/BioHome/ciro/simon/info/markers.csv', stringsAsFactors = F)
markers
mymarkers <- unique(c('FOXP3', markers[, 1], 'IL2RA', 'TNFRSF9', 'TNFRSF18', 'DUSP4',
'CCR8', 'IL1R2', 'IKZF2', 'ENTPD1', 'LAG3', 'TIGIT', 'CTLA4', 'PDCD1','TOX'))[1]
mymarkers <- getfound(mymarkers, rownames(pancreas.integrated@assays$RNA), v = T)
fname <- paste0('markers_', redu, '_vln.pdf')
pdf(fname, height = 7.5, width = 15)
for(i in 1:length(mymarkers)){
print(plot_grid(FeaturePlot(pancreas.integrated, features = mymarkers[i], min.cutoff = 0),
VlnPlot(pancreas.integrated, mymarkers[i], group.by = gby, assay = "RNA") + NoLegend()))
}
# vlnplot(pancreas.integrated, gg = mymarkers[i], orderby = gby, plotdots = T, noncero = T, v = T)
graphics.off()
pdf(paste0('markers_', redu, '.pdf'), height = 8, width = 8)
for(i in 1:length(mymarkers)){
print(FeaturePlot(pancreas.integrated, features = mymarkers[i], min.cutoff = 0))
}
graphics.off()
pdf(paste0('markers_', redu, '_split.pdf'), height = 5, width = 25)
for(i in 1:length(mymarkers)){
print(FeaturePlot(pancreas.integrated, features = mymarkers[i], min.cutoff = 0, split.by = 'orig.set'))
}
graphics.off()
DefaultAssay(object = pancreas.integrated)
dim(pancreas.integrated@assays$integrated@counts)
dim(pancreas.integrated@assays$integrated@data)
save(pancreas.integrated, file = '../integrated.RData')
load('integrated.RData') #### --------------------------------------------------
# pdf('combined_genes.pdf', 16, 5)
# FeaturePlot(pancreas.integrated, features = c('BCL6', 'CXCR5'), blend = T)
# graphics.off()
## BCL6+, CXCR5+, BCL6+CXCR5+ cells within the 4 FoxP3+ clusters
metadata <- pancreas.integrated[[]]#[, c('orig.fcmarkers', gby)]
metadata <- remove.factors(metadata)
colnames(metadata) <- sub(gby, 'Cluster', colnames(metadata))
# void <- theObjectSavedIn('../../data/metadata.RData')
# metadata <- cbind_repcol(void[getfound(rownames(metadata), rownames(void), v = T), ], metadata)
# metadata <- metadata[getsubset(c('Cluster', '1', '4', '5', '7'), metadata, v = T), ]
metadata <- metadata[, sapply(metadata, function(x) all(!is.na(x)) && length(table(x)) < 100 ) ]
metadata <- metadata[, getpats(colnames(metadata), c('tag', 'orig.fc', 'Cluster'), 'major')]
head(metadata)
sapply(metadata, table)
tvar <- sapply(head(colnames(metadata), -1), function(x) table(metadata[, 'Cluster'], metadata[, x]) )
tvar <- t(do.call(cbind, tvar))
tvar
write.csv(tvar, file = 'clusters_markers.csv')
mymat <- pancreas.integrated@assays$RNA #integrated
mymat <- as.matrix(mymat[getfound(mymarkers, rownames(mymat), v = T), ])
tvar <- make_list(remove.factors(pancreas.integrated@meta.data), gby, grouping = T)
tvar <- mixedsort(tvar)
void <- get_stat_report(mymat[, names(tvar)], groups = tvar, moments = c('bm', 'mn', 'p'), v = T)
rownames(void) <- paste0("'", rownames(void))
head(void)
write.csv(void, file = 'genes_stats_merged.csv')
freq_tablep(metadata = metadata, cnames = c('tag_FOXP3', 'Cluster'))
#### Differential Expression #### ------
myidents <- c(1, 4, 6, 8)[-4]
prefix <- 'dea_global'
sset <- c('orig.set', 'guo', 'zheng', 'zhang')
prefix <- 'dea_foxp3'
sset <- list(c('orig.set', 'guo', 'zheng', 'zhang'), c(gby, myidents))
prefix <- 'dea_foxp3_gs'
dir.create(prefix)
pancreas.subset <- SubsetData(pancreas.integrated, cells = getsubset(sset, pancreas.integrated[[]], v = T))
table(pancreas.subset@meta.data[, c('orig.set', gby)])
idents <- matrix(myidents)#unique(pancreas.integrated[, gby])
idents <- combinations(nrow(idents), r = 2, v = idents[, 1], set = TRUE, repeats.allowed = FALSE)
for(i in 1:nrow(idents)){
identy <- idents[i, 1]
if(ncol(idents) > 1) identy2 <- idents[i, 2] else identy2 <- NULL
cat('Group(s)', commas(idents[i, ]), '\n')
fname <- paste0(c(paste0(prefix, '/fdiffExp'), identy, identy2, sset[[1]][-1], '.csv'), collapse = "_")
if(!file.exists(fname)){
cmarkers <- FindConservedMarkers(object = pancreas.subset, ident.1 = identy, ident.2 = identy2, grouping.var = "orig.set", logfc.threshold = 0.1)
cmarkers$min_avg_logFC <- apply(cmarkers[, getpats(colnames(cmarkers), 'avg_logFC')], 1, function(x){
ifelse(all(min(x) * x > 0), min(x), 0)
})
write.csv(cmarkers, file = fname)
}else cmarkers <- read.csv(fname, stringsAsFactors = F, check.names = F, row.names = 1)
head(cmarkers); dim(cmarkers)
# degs <- cmarkers$max_pval < 0.05
# [rowSums(cmarkers[, getpats(colnames(cmarkers), '_pct.')] > 0.1) == 4, ]
degs <- getDEGenes(cmarkers, pv = 0.05, upreg = T, pvtype = 'minimump_p_val', lfc.type = 'min_avg_logFC')
degs <- rownames(cmarkers)[rownames(cmarkers) %in% degs]
head(cmarkers[degs, ], 30)
summary(abs(cmarkers[degs, ]$zheng_avg_logFC))
# fname <- sub(paste0(prefix, "/f"), paste0(prefix, "/"), fname)
# if(!file.exists(fname)){
# void <- DoHeatmap(pancreas.subset, features = degs, group.bar = T) + theme(axis.text.y = element_text(size = 4))
# thesegenes <- getfound(mymarkers, rownames(cmarkers[degs, ]), v = T)
# if(length(thesegenes) > 0) plots <- VlnPlot(object = pancreas.subset, features = thesegenes, split.by = 'orig.set', pt.size = 0, combine = FALSE)
# pdf(sub('\\.csv', '.pdf', fname), width = 16, height = 8)
# print(void)
# if(length(thesegenes) > 0) print(CombinePlots(plots = plots, ncol = fitgrid(thesegenes)[2]))
# graphics.off()
# }
cat('Done!\n')
}
mymarkersf <- list.files(prefix, pattern = 'fdiffExp.*csv', full.names = T)
cmarkers <- lapply(mymarkersf, read.csv, stringsAsFactors = F, check.names = F, row.names = 1)
tvar <- gsub(paste0(c(prefix, "/fdiffExp", "_", ".csv", sset[[1]][-1]), collapse = "|"), "", mymarkersf)
tvar <- sapply(strsplit(tvar, ""), paste, collapse = "vs")
names(cmarkers) <- tvar
head(cmarkers[[1]])
degs <- unique(unlist(lapply(cmarkers, function(x){
cnames <- getpats(colnames(x), '_pct.')
getDEGenes(x[rowSums(x[, cnames] > 0.1) == length(cnames), ], pv = 0.05, upreg = T,
pvtype = 'minimump_p_val', lfc.type = 'min_avg_logFC')
})))
length(degs)
fname <- paste0(c(prefix, 'degs', sset[[1]][-1], '.csv'), collapse = "_")
write.csv(degs, file = sub("degs", "degs_list", fname), row.names = F)
void <- DoHeatmap(pancreas.subset, features = degs, group.bar = T) + theme(axis.text.y = element_text(size = 1))
pdf(sub('\\.csv', '.pdf', fname), width = 16, height = 8)
print(void)
graphics.off()
cnames <- c('min_pct.1', 'min_pct.2', 'max_pval', 'minimump_p_val', 'min_avg_logFC')
cmarkerscombine <- rbindlist(lapply(names(cmarkers), function(y){
x <- cmarkers[[y]]
pct_names <- getpats(colnames(x), '_pct.')
x <- x[getDEGenes(x, pv = 0.05, upreg = T, pvtype = 'minimump_p_val', , lfc.type = 'min_avg_logFC'), ]
x$min_pct.1 <- apply(x[, getpats(colnames(x), 'pct.1')], 1, min)
x$min_pct.2 <- apply(x[, getpats(colnames(x), 'pct.2')], 1, min)
x <- x[abs(x$min_pct.1 - x$min_pct.2) > 0.01, ]
cbind(gene_name = paste0("'", rownames(x)), cluster = y, x[, cnames])
}))
cmarkerscombine
write.csv(cmarkerscombine, file = fname, row.names = F)
ntopg <- 12
topgenes <- as.data.frame(rbindlist(lapply(levels(cmarkerscombine$cluster), function(x){
dat <- cmarkerscombine[as.character(cmarkerscombine$cluster) == x, ]
setorder(dat, minimump_p_val)
head(dat, ntopg)
})))
mymarkers <- thesegenes <- unique(sub("'", "", as.character(topgenes$gene_name)))
void <- DotPlot(pancreas.subset, thesegenes, group.by = gby) +
theme(axis.text.x = element_text(angle = 45, face = "bold", hjust = 1))
pdf(sub('\\.csv', paste0('top', ntopg, '.pdf'), fname), width = 16, height = 8)
print(void)
graphics.off()
fname <- sub('\\.csv', paste0('top', ntopg, '_vln.pdf'), fname) # back to tsne and vlnplots
mymat <- as.matrix(GetAssayData(object = pancreas.subset, slot = "data"))
# mymat <- as.matrix(GetAssayData(object = pancreas.subset, assay = "RNA"))
tmp <- getfound(degs, rownames(mymat), v = T)
metadata <- pancreas.subset[[]]
metadata <- remove.factors(metadata)
colnames(metadata) <- sub(gby, 'Cluster', colnames(metadata))
headmat(metadata); tailmat(metadata)
source('/mnt/BioHome/ciro/scripts/functions/group_specificity.R')
group_spec <- g_sp(
cmarkers, # comparisons stats
this_degs = NULL, # list of DEGs vectors per comparison
fpvtype = 'minimump_p_val', # significance
ffctype = 'min_avg_logFC', # fold-change
padjthr = 0.05, # significance threshold
fcthr = 0, # fc threshold
methd = 'suas', # method
gglobal = FALSE, # if data struture is for global
gref = NULL, # reference group for activation
sharedmax = 2, # maximum number of groups sharing a gene
groups_cols = NULL, # groups colours data.frame
expr_mat = mymat, # matrix for visualisation
expr_mattype = 'SeuratIntegrated', # chosen matrix for visualisation
datatype = 'sc', # to choose the visualisation
vs = 'vs', # string splitting comparison names
gtf = NULL, # extra info for genes, data.frame
gtfadd = NULL, # columns to add
path_plot = 'dea_foxp3_gsa', # path to plot
annotation = metadata, # annotation for samples
cname = 'Cluster', # name id for files
hmg_order = NULL, # order of groups data.frame
ngenes = 20, # number of genes to plot
sufix = 'mean', # sufix to add to file names
order_by = 'column_name', # order samples in heatmap
hm_order = 'minFC_p', # gene order per group
sepchar = 'n',
log_norm = FALSE,
coulrange = c('blue', 'black', 'yellow'), # colours to use
groupsamp = FALSE, # sample samples in group to plot
verbose = TRUE, # Print progress
myseed = 27 # seed for determinism
)
|
c02ccf54985e5f73ee2719ccbc389a9020ef1fbb | 46691c6d60bc7b9df7735f46c15701638b2a8fb5 | /heritability/scripts/moduleSummaryTable2.R | 9ef0dc3d0d5cef9ab6251272596d8ef26893c971 | [] | no_license | HaoKeLab/starnet | 44320c8569094fd968e4f3d6cb5e06a89c766308 | b8a70a5765f00dd1159dec3ed480bd139a3b3895 | refs/heads/main | 2023-08-20T07:17:17.037236 | 2021-10-28T14:51:01 | 2021-10-28T14:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,542 | r | moduleSummaryTable2.R | # Some module statistics
rm(list=ls())
library(data.table)
setwd("~/GoogleDrive/projects/STARNET/cross-tissue")
# Load STARNET cis-eQTL data, estimated by Vamsi
# -----------------------------------------------------
getEqtlNew = function() {
cis_eqtl_dir = "~/DataProjects/STARNET/vamsi_eQTL/adjusted.final"
cis_eqtl_files = list.files(cis_eqtl_dir, pattern="*.tbl")
tissues = sapply(strsplit(cis_eqtl_files, "_"), function(x) x[1])
# Rename tissue codes
tissues[tissues == "SKM"] = "SKLM"
tissues[tissues == "SUF"] = "SF"
tissues[tissues == "BLO"] = "BLOOD"
cis_eqtl = lapply(
cis_eqtl_files,
function(file_name) {
d = fread(file.path(cis_eqtl_dir, file_name))
# d = d[d$padj_fdr < 0.05, ] # FDR < 5%
# d = d[d$padj_fdr < 0.01, ] # FDR < 1%
# d = d[d$padj_fdr < 0.001, ] # FDR < .1%
d = d[d$padj_fdr < 0.0001, ] # FDR < .01%
d = d[order(d[["p-value"]]), ]
return(d)
})
names(cis_eqtl) = tissues
# Add tissue information to table
for (i in 1:length(cis_eqtl)) {
cis_eqtl[[i]]$tissue = tissues[i]
}
# Exclude macrophage eQTL
cis_eqtl = cis_eqtl[-which(names(cis_eqtl) == "MAC")]
# Combine tables
cis_eqtl = rbindlist(cis_eqtl)
cis_eqtl$tissue_ensembl_id = paste(cis_eqtl$tissue, cis_eqtl$gene, sep="_") # tissue ensembl IDs for matching with module assignments
return(unique(cis_eqtl$tissue_ensembl_id))
}
cis_eqtl_all = getEqtlNew() # Vamsi's eQTL
# Load TF definition from Lambert et al
# -------------------------------------------------------
tf_symbols = as.character(read.table("transcription-factors/lambert/TF_names_v_1.01.txt")$V1)
# Load key driver analysis results
# --------------------------------
# kda = fread("co-expression/annotate/grn_vamsi_eqtl/kda/modules.results.txt")
kda = fread("co-expression/annotate/grn_vamsi_eqtl/kda/modules.directed.results.txt")
kda = kda[kda$FDR < 0.05, ]
# kda = kda[kda$FDR < 0.0001, ]
# Load module table
mod_tab = fread("co-expression/tables/module_tab.csv")
# Load meta gene table
modules = fread("co-expression/tables/modules.csv")
modules$tissue_ensembl_id = paste0(modules$tissue, "_",
sapply(strsplit(modules$ensembl, "[.]"), function(x) x[1])
)
# Regulator status in GENIE3 analysis. From geneRegulatoryNetworkInference.R script
modules$regulator = FALSE
modules$regulator[modules$gene_symbol %in% tf_symbols] = TRUE
sum(modules$regulator)
modules$regulator[modules$tissue_ensembl_id %in% cis_eqtl_all] = TRUE
sum(modules$regulator)
mean(modules$regulator)
tab = mod_tab[, 1:2]
colnames(tab)[1] = "mod_id"
tab$n_regulators_TF_eSNP = table(modules$clust, modules$regulator)[, 2]
kda[kda$MODULE == 1, ]
kda_numbers = melt(table(kda$MODULE))
colnames(kda_numbers) = c("mod_id", "n_key_drivers")
tab = merge(tab, kda_numbers, all.x=TRUE)
tab$type[mod_tab$purity < 0.95] = "cross-tissue"
tab$type[mod_tab$purity >= 0.95] = "tissue-specific"
# write.csv(tab, "heritability/eQTL/module_eqtl_TF_KD.csv",
# row.names=FALSE)
write.csv(tab, "heritability/eQTL/module_eqtl_TF_KD_directed.csv",
row.names=FALSE)
# tab$n_key_drivers / tab$n_regulators_TF_eSNP * 100
sum(tab$n_key_drivers, na.rm=TRUE) / sum(tab$n_regulators_TF_eSNP)
sum(tab$n_key_drivers[tab$type == "cross-tissue"], na.rm=TRUE) / sum(tab$n_regulators_TF_eSNP[tab$type == "cross-tissue"])
sum(tab$n_key_drivers[tab$type == "tissue-specific"], na.rm=TRUE) / sum(tab$n_regulators_TF_eSNP[tab$type == "tissue-specific"])
tab$n_key_drivers / tab$n_regulators_TF_eSNP * 100
hist(tab$n_key_drivers / tab$mod_size * 100, breaks=20)
|
783396f446fdf6db45c544a4c8eb7af31082eb16 | 71d8e0b733b11df6c7f83df521ccb704052f970e | /man/pos_cfg_cfa.Rd | 4adb3a69bf7c7a3bf7088594786959655d14d751 | [] | no_license | cran/confreq | 3d00f8d274d037c64ecc8e3c77052f53483000b8 | a06c53047b445ca4d65f1910e4c2b0b19086b30a | refs/heads/master | 2022-11-20T18:44:06.602379 | 2022-11-13T04:40:15 | 2022-11-13T04:40:15 | 17,695,227 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,238 | rd | pos_cfg_cfa.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pos_cfg_cfa.R
\name{pos_cfg_cfa}
\alias{pos_cfg_cfa}
\title{Possible configurations}
\usage{
pos_cfg_cfa(kat, fact = FALSE)
}
\arguments{
\item{kat}{a numerical vector containing kardinal numbers, giving the number of categories for each variable.
So the length of this numerical vector represents the number of variables.}
\item{fact}{logical, default is \code{(fact=FALSE)}. If this argument is set to \code{(fact=TRUE)} the result is coerced to a data.frame with factor variables.}
}
\value{
An object of class "matrix" or "data.frame" (depending on the argument \code{fact}) containing all possible configurations for \code{lenght(kat)} variables with the respective number of categories given as kardinal numbers in the vector \code{kat}.
}
\description{
Calculates all possible configuartions for some variables with different numbers of categories.
}
\details{
No details
}
\examples{
#######################################
# possible configurations for ...
# three variables with two categories each (Lienert LSD example).
pos_cfg_cfa(kat=c(2,2,2))
#######################################
}
\references{
No references in the moment
}
\keyword{misc}
|
28c2c6e38cc53465e78b84b23c7baf45b4ef7e01 | 52b57e049f480e08dc86ee93934c7266cc9a7008 | /R/leap comparison.r | 37bf1e11e93316ca71c4f4808a9607e54237826f | [] | no_license | amcox/step-1415 | 403cb3ec9309adac0f6a742a7115c200ed48ef36 | f08dce384e645becf59c97988de198aee0caf9c8 | refs/heads/master | 2020-12-24T19:18:51.252754 | 2016-02-28T20:54:35 | 2016-02-28T20:54:35 | 24,353,994 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,635 | r | leap comparison.r | library(tidyr)
library(dplyr)
library(ggplot2)
library(scales)
library(gdata)
update_functions <- function() {
old.wd <- getwd()
setwd("functions")
sapply(list.files(), source)
setwd(old.wd)
}
update_functions()
df.step <- load_data_with_gaps_long()
df.step <- subset(df.step, wave == 3 & !is.na(level))
df.step$level[df.step$level == 'FP'] <- 13
d.l <- load_leap_data()
d.l <- subset(d.l, test == 'L14' & achievement_level %in% c('A', 'M', 'B', 'AB', 'U'))
d.l <- d.l[, c('achievement_level', 'student_number', 'subject')]
d <- merge(df.step, d.l, by.x='id', by.y='student_number')
d$level <- as.numeric(d$level)
steps <- unique(d$level)
# TODO: Make work for separate subjects (math, ela), then facet for schools
find_step_leap_prof_percs <- function(d) {
find_percent_basic <- function(step.cut, data){
mean(data[data$level >= step.cut, ]$achievement_level %in% c('A', 'M', 'B'))
}
steps <- unique(d$level)
data.frame(step=steps, perc.prof=sapply(steps, find_percent_basic, d))
}
# Graphs of percents basic and above at each STEP level
d.perc <- d %>% group_by(subject, grade) %>% do(find_step_leap_prof_percs(.))
p <- ggplot(d.perc, aes(x=step, y=perc.prof))+
scale_x_continuous(breaks=seq(1, 13, 1))+
scale_y_continuous(labels=percent)+
geom_point()+
labs(title="2014 LEAP Scores by 2014 Wave 3 STEP Level",
x="STEP",
y="Percent of Students at or Above that STEP Level Scoring Basic or Above"
)+
theme_bw()+
facet_grid(grade ~ subject)
save_plot_as_pdf(p, '2013-14 LEAP and STEP Scores, By Grade and Subject')
d.perc <- d %>% group_by(subject, grade, school) %>% do(find_step_leap_prof_percs(.))
p <- ggplot(subset(d.perc, subject == 'ela'), aes(x=step, y=perc.prof))+
scale_x_continuous(breaks=seq(1, 13, 1))+
scale_y_continuous(labels=percent)+
geom_point()+
labs(title="2014 LEAP Scores by 2014 Wave 3 STEP Level, ELA",
x="STEP",
y="Percent of Students at or Above that STEP Level Scoring Basic or Above"
)+
theme_bw()+
facet_grid(grade ~ school)
save_plot_as_pdf(p, '2013-14 LEAP and STEP Scores, ELA By Grade and School')
# Plots of ALs at each STEP
d <- d %>% mutate(al.cat=achievement_level %in% c('A', 'M', 'B'))
d$al.cat[d$al.cat] <- 'CR'
d$al.cat[d$al.cat == 'FALSE'] <- 'NCR'
dh <- d %>% group_by(subject, grade, al.cat) %>% do(get_counts(., 'level', seq(-1, 13,1)))
p <- ggplot(subset(dh, subject %in% c('ela', 'math')), aes(x=h.mids+0.5, y=h.counts, color=al.cat))+
geom_line()+
scale_x_continuous(breaks=seq(-1, 13, 1))+
scale_color_manual(values=c('CR'='#198D33', 'NCR'='#D16262'),
labels=c('CR'='Basic or Above', 'NCR'='Below Basic')
)+
labs(x='Wave 3 STEP',
y='Number of Students',
title='Number of Students Proficient on LEAP by STEP\n2013-14 By Subject - Grade'
)+
theme_bw()+
theme(
legend.title=element_blank()
)+
facet_grid(grade ~ subject)
save_plot_as_pdf(p, '2013-14 LEAP and STEP Counts, By Grade and Subject')
dh <- d %>% group_by(subject, grade, al.cat, school) %>% do(get_counts(., 'level', seq(-1, 13,1)))
p <- ggplot(subset(dh, subject %in% c('ela')), aes(x=h.mids+0.5, y=h.counts, color=al.cat))+
geom_line()+
scale_x_continuous(breaks=seq(-1, 13, 1))+
scale_color_manual(values=c('CR'='#198D33', 'NCR'='#D16262'),
labels=c('CR'='Basic or Above', 'NCR'='Below Basic')
)+
labs(x='Wave 3 STEP',
y='Number of Students',
title='Number of Students Proficient on ELA LEAP by STEP\n2013-14 By School - Grade'
)+
theme_bw()+
theme(
legend.title=element_blank()
)+
facet_grid(grade ~ school)
save_plot_as_pdf(p, '2013-14 LEAP and STEP Counts, ELA By Grade and School')
|
54706985d1d459cd435c84b6e73fd4be2e0440b5 | 1f9f3319681aa377b07aa7767efdf39ef3548dc4 | /keras_diy-model.R | 9867f5303624edfeb933a3881aec5ff29280e04f | [
"MIT"
] | permissive | MarauderPixie/learning_keras_for_R | 5e6a3bbba13cdd19ffb9c9919eab6a075b567712 | 020128d64563ef3931a77f59fff05bb410c266b4 | refs/heads/main | 2023-02-11T03:50:28.900449 | 2021-01-06T15:19:29 | 2021-01-06T15:19:29 | 320,532,966 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,261 | r | keras_diy-model.R | m <- keras_model_sequential() %>%
layer_conv_2d(filters = 64, kernel_size = c(5, 5),
activation = "relu",
input_shape = c(263, 263, 3)) %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_conv_2d(filters = 128, kernel_size = c(5, 5),
activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_conv_2d(filters = 128, kernel_size = c(5, 5),
activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_conv_2d(filters = 128, kernel_size = c(5, 5),
activation = "relu") %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
# layer_conv_2d(filters = 128, kernel_size = c(12, 12),
# activation = "relu") %>%
layer_flatten() %>%
# layer_dense(256, activation = "relu", input_shape = 263*263*4) %>%
layer_dense(512, activation = "relu") %>%
layer_dense(2, activation = "sigmoid")
m %>%
compile(
loss = "binary_crossentropy",
optimizer = optimizer_adam(), # lr = .05, decay = .005),
metrics = "accuracy"
)
m %>%
fit(
train_data,
train_labels,
epochs = 30,
# batch_size = 10,
validation_split = .2,
verbose = 2,
shuffle = TRUE
)
|
0bf2b4f7b7609005572840998181b0d67b7d92fd | 6ffcd151cc5b7bb0579fcb9d770b922339b82a25 | /App.R | 73c91a011cae68698af4dff4df455bc20f6a5d7b | [] | no_license | RTAgung/Sentiment-Analysis-using-KNN | 8d56e1ed7cae4476512c3b269aca113e553e9cfb | c4f07107e36660f116eddc544547af081b96c494 | refs/heads/master | 2023-02-22T17:16:58.999126 | 2021-01-27T10:42:22 | 2021-01-27T10:42:22 | 333,383,223 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,238 | r | App.R | library(tidytext)
library(dplyr)
library(stringr)
library(ggplot2)
# load data
original_data_gephi <- read.csv(file = "data-raw/data_twitter_gephi.csv")
original_data_gephi <- original_data_gephi %>%
filter(twitter_type == "Tweet") %>%
arrange(desc(Id)) %>%
select(Id, Label) %>%
sample_n(100)
data_training <- read.csv(file = "data-raw/data_training.csv")
# get spesific column
all_data <- data.frame(text = original_data_gephi$Label,
sentiment = NA) %>%
rbind(data_training) %>%
mutate(id = row_number(), .before = "text")
# split data training & data testing
data_predict_full <- all_data[1:100,]
data_training <- all_data[101:250,]
# cleaning data
temp_data_cleaning <- all_data
## remove retweet entities
temp_data_cleaning$text <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", temp_data_cleaning$text)
## remove at people
temp_data_cleaning$text <- gsub("@\\w+", " ", temp_data_cleaning$text)
## remove hastag
temp_data_cleaning$text <- gsub("#\\w+", " ", temp_data_cleaning$text)
## remove html links
temp_data_cleaning$text <- gsub("https://t.co/\\w+", " ", temp_data_cleaning$text)
## remove emoticon
temp_data_cleaning$text <- gsub('[^\x01-\x7F]', "", temp_data_cleaning$text)
## remove dot
temp_data_cleaning$text <- gsub('[\\.\\,]', " ", temp_data_cleaning$text)
## remove puntuation
temp_data_cleaning$text <- gsub('[[:punct:]]', "", temp_data_cleaning$text)
## remove control character
temp_data_cleaning$text <- gsub('[[:cntrl:]]', " ", temp_data_cleaning$text)
## remove digit
temp_data_cleaning$text <- gsub('\\d+', "", temp_data_cleaning$text)
## remove unnecessary spaces
temp_data_cleaning$text <- gsub("[ \t]{2,}", " ", temp_data_cleaning$text)
temp_data_cleaning$text <- gsub("^\\s+|\\s+$", "", temp_data_cleaning$text)
## change to lower case
temp_data_cleaning$text <- tolower(temp_data_cleaning$text)
temp_data_cleaning[temp_data_cleaning == ""] <- NA
## remove stop words
temp_data_cleaning <- temp_data_cleaning %>%
select(id, text) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words) %>%
group_by(id) %>%
summarize(text = str_c(word, collapse = " ")) %>%
ungroup()
# split clean data training & data testing
clean_data_training <- data_training %>%
left_join(temp_data_cleaning, by = "id") %>%
select(id, text.y)
colnames(clean_data_training)[2] <- "text"
clean_data_predict <- data_predict_full %>%
left_join(temp_data_cleaning, by = "id") %>%
select(id, text.y)
colnames(clean_data_predict)[2] <- "text"
# predict all data
result_predict <- data_predict_full
for (j in seq_len(nrow(clean_data_predict))) {
cat(sprintf("\nProses: (%d / %d)", j, nrow(clean_data_predict)))
# Executing Process
data_predict <- clean_data_predict[j,]
tidy_data <- clean_data_training %>%
rbind(data_predict)
tf_idf <- tidy_data %>%
unnest_tokens(word, text) %>%
count(id, word, sort = TRUE) %>%
bind_tf_idf(word, id, n)
# wdi*wdj
bobot_predict <- tf_idf %>%
filter(id == data_predict$id)
bobot_training <- data.frame(id = integer(),
sum = numeric())
for (i in seq_len(nrow(clean_data_training))) {
temp_data <- tf_idf %>%
filter(id == clean_data_training$id[i])
join <- bobot_predict %>%
inner_join(temp_data, by = "word") %>%
mutate(kali = tf_idf.x * tf_idf.y)
bobot_training <- bobot_training %>%
rbind(data.frame(id = clean_data_training$id[i], sum = sum(join$kali)))
}
# panjang vektor
kuadrat_bobot <- tf_idf
kuadrat_bobot$tf_idf <- kuadrat_bobot$tf_idf^2
vektor <- data.frame(id = integer(),
sum = numeric(),
sqrt = numeric())
for (i in seq_len(nrow(tidy_data))) {
temp_data <- kuadrat_bobot %>%
filter(id == tidy_data$id[i])
temp_sum <- sum(temp_data$tf_idf)
temp_sqrt <- sqrt(temp_sum)
vektor <- vektor %>%
rbind(data.frame(id = tidy_data$id[i],
sum = temp_sum,
sqrt = temp_sqrt))
}
# cosine similarity
vektor_predict <- vektor %>% filter(id == data_predict$id)
cosine <- data.frame(id = integer(),
cosine = numeric())
for (i in seq_len(nrow(clean_data_training))) {
temp_id <- clean_data_training$id[i]
temp_bobot <- bobot_training %>% filter(id == temp_id)
temp_vektor <- vektor %>% filter(id == temp_id)
temp_cosine <- temp_bobot$sum / (vektor_predict$sqrt * temp_vektor$sqrt)
cosine <- cosine %>%
rbind(data.frame(id = temp_id,
cosine = temp_cosine))
}
# knn
k <- 5
cek <- cosine %>%
left_join(data_training, by = "id") %>%
select(id, cosine, sentiment) %>%
arrange(desc(cosine)) %>%
head(k)
sentiment_predict <- cek %>%
count(sentiment)
sentiment_predict <- sentiment_predict$sentiment[which.max(sentiment_predict$n)]
result_predict$sentiment[j] <- sentiment_predict
}
write.csv(data_predict_full, "data-raw/data_predict.csv", row.names = FALSE)
write.csv(clean_data_predict, "data-raw/data_predict_clean.csv", row.names = FALSE)
write.csv(result_predict, "data-raw/data_predict_result.csv", row.names = FALSE)
cat(sprintf("\nSelesai"))
#save.image("App.RData")
#load('App.RData') |
7abe3e600cec80927f012c28c6ea7d5ae62c6384 | 7cf9e172e7df788d760d0c1c564ea5d8ddf84bfa | /PIISA.R | fc5f08b67ad4c1e888545622f33ce7d170467d9f | [
"MIT"
] | permissive | DanielSGrant/PIISA | 9a5120d33a2328bf17b6662db994f1ddee528f98 | 602e19112a4065b5dceea62c90cb1238eb571f97 | refs/heads/main | 2023-04-18T22:11:13.581010 | 2021-05-10T15:19:33 | 2021-05-10T15:19:33 | 352,377,557 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,120 | r | PIISA.R | cat("\014")
writeLines(c("\nWelcome to PIISA, an interactive Pipeline for Iterative and Interactive Sequence analysis!",
"Reminder that throughout the program you will be prompted for inputs! Type these inputs from the keyboard and press enter to continue.",
"At any input stage in the program enter q to quit the progam, (Please note that an error message will be displayed upon quitting).",
"At prompts suggested values are enclosed in parentheses (), and lists of all options are enclosed in square brackets [].",
"To select a default value simply hit enter without typing anything."))
start <- readline("Press enter to continue: ")
if(start == "q"){stop()}
while(start != "")
{
start <- readline("Try again, press enter without typing anything: ")
if(start == "q"){stop()}
}
writeLines(c(" ","Checking for required packges and attempting to download.",
"If an error occurs at this stage it may be necessary to manually install the required packages."
,"Please refer to FAQ in manual for more information if this occurs."))
#Check for all neccessary packages
#install Bioconductor
if(!requireNamespace('BiocManager',quietly = TRUE)){
install.packages('BiocManager')
}
library(BiocManager)
#Install dada2
if(!requireNamespace('dada2',quietly = TRUE)){
BiocManager::install("dada2", version = "3.12")
}
library(dada2); packageVersion("dada2")
#install DECIPHER
if(!requireNamespace('DECIPHER',quietly = TRUE)){
BiocManager::install("DECIPHER", quietly=TRUE)
}
library(DECIPHER); packageVersion("DECIPHER")
#Load ggplot
if(!requireNamespace('ggplot2',quietly = TRUE)){
install.packages('ggplot2')
}
library(ggplot2); packageVersion("ggplot2")
#Load phyloseq
if(!requireNamespace('phyloseq',quietly = TRUE)){
BiocManager::install("phyloseq", quietly=TRUE)
}
library(phyloseq); packageVersion("phyloseq")
writeLines("Finished loading packages\n",)
#Setting working directory to file location
script.dir <- dirname(sys.frame(1)$ofile)
setwd(script.dir)
#Name of folder for input files
ifolder = "Input"
#Name of folder for output files
ofolder = paste(getwd(),"Output",sep='/')
#Get naming pattern for forward and reverse reads
writeLines(c("Please enter the pattern for forward and reverse files. For example, forward files:",
"'Tree550mcrA_R1.fastq.sanger.gz' and 'Well2mcrA_R1.fastq.sanger.gz'", "Have pattern '_R1.fastq.'"))
Forward <- readline("Please enter the pattern for your forward Files: ")
if(Forward == "q"){stop()}
fnFs <- sort(list.files(path=(paste(getwd(),ifolder, sep="/")),pattern=Forward, full.names = TRUE))
while(identical(fnFs,character(0)))
{
Forward <- readline("Error opening files, please check that files are in 'Input' folder and spelling is corect and try again: ")
if(Forward == "q"){stop()}
fnFs <- sort(list.files(path=(paste(getwd(),ifolder, sep="/")),pattern=Forward, full.names = TRUE))
}
Reverse <- readline("Please enter the pattern for your reverse Files: ")
if(Reverse == "q"){stop()}
fnRs <- sort(list.files(path=(paste(getwd(),ifolder, sep="/")), pattern=Reverse, full.names = TRUE))
while(identical(fnRs,character(0)))
{
Reverse <- readline("Error opening files, please check that files are in 'Input' folder and spelling is corect and try again: ")
if(Reverse == "q"){stop()}
fnRs <- sort(list.files(path=(paste(getwd(),ifolder, sep="/")), pattern=Reverse, full.names = TRUE))
}
# Extract sample names, assuming filenames have format: SAMPLENAME_XXX.fastq
sample.names <- sapply(strsplit(basename(fnFs), "_"), `[`, 1)
#Create output folder if it doesn't exist already
dir.create(file.path(ofolder), showWarnings = FALSE)
writeLines("\nTaking inputs for quality scores plot.")
#Get parameters for plots
#Select plot width
w <- readline("Please enter the desired width for the quality scores plot in inches (8): ")
if(w == "q"){stop()} else if(w == ""){w = 8}
while(as.numeric(w) < 0 || as.numeric(w) > 50)
{
w <- readline("Invalid entry, please enter a number for plot width in inches (8): ")
if(w == "q"){stop()} else if(w == ""){w = 8}
}
#Select plot height
h <- readline("Please enter the desired height for the quality scores plot in inches (8): ")
if(h == "q"){stop()}else if(h == ""){h = 8}
while(as.numeric(h) < 0 || as.numeric(h) > 50)
{
h <- readline("Invalid entry, please enter a number for plot height in inches (8): ")
if(h == "q"){stop()} else if(h == ""){w = 8}
}
#select font family
f <- readline("Please enter the desired font for quality score plots (Helvetica): ")
if(f == "q"){stop()} else if(f == ""){f = "Helvetica"}
while(!(f %in% names(pdfFonts())))
{
disp <- readline("Error, invalid font. Would you like to see a list of valid fonts? [y/n]: ")
if(disp == "q"){stop()} else if(disp == 'y'){print(names(pdfFonts()))}
while(disp != 'y' && disp != 'n')
{
disp <- readline("Error, invalid selection. Enter y to see available fonts of n to enter font: ")
if(disp == "q"){stop()} else if(disp == 'y'){print(names(pdfFonts()))}
}
f <- readline("Please enter the desired font for quality score plots (Helvetica): ")
if(f == "q"){stop()} else if(f == ""){f = "Helvetica"}
}
#Select paper size
p <- readline("Would you like generated pdf's size to be 8.5\"x11\"? (y) [y/n]: ")
if(p == "q"){stop()} else if(p == "" || p == 'y'){p = "default"} else if(p == 'n'){p = "special"}
while(p != "special" && p != "default")
{
p <- readline("Unexpected entry, please enter y for letter paper, or n for custom sizing: ")
if(p == "q"){stop()} else if(p == "" || p == 'y'){p = "default"} else if(p == 'n'){p = "special"}
}
#Put plots of quality scores in pdf
writeLines("\nPlotting quality scores and writing to pdf in Output folder")
pdf(file = file.path(paste(ofolder,"Quality_Scores.pdf",sep='/')) ,width = as.numeric(w),height = as.numeric(h),
family = f, paper = p)
#Check quality scores of forward reads
print(plotQualityProfile(fnFs))
#Check quality scores of reverse reads
print(plotQualityProfile(fnRs))
dev.off()
writeLines("Done")
# Place filtered files in filtered/ subdirectory
filtFs <- file.path("./Filtered", paste0(sample.names, "_F_filt.fastq.gz"))
filtRs <- file.path("./Filtered", paste0(sample.names, "_R_filt.fastq.gz"))
names(filtFs) <- sample.names
names(filtRs) <- sample.names
writeLines(" ")
#Enter first loop for trimming, dada analysis, merging, and chimera removal
lcv = 'y'
first = TRUE;
count <- 1
while(lcv != 'n')
{
ftd.folder <- paste("FilterTrimDada_Run",count,sep = "")
#Create folder if it doesn't exist already
dir.create(file.path(paste(ofolder,ftd.folder, sep = "/")), showWarnings = FALSE)
#Set windows = TRUE if you are using a windows machine, else windows=FALSE
if(first)
{
win <- readline("Are you using a windows computer? [y/n]: ")
if(win == "q"){stop()}
while(win != 'y' && win != 'n')
{
win <- readline("Unexpected selection, please enter y if you are on a windows machine, or n if not: ")
if(win == "q"){stop()}
}
if(win == 'y')
{
windows = TRUE
}
else
{
windows = FALSE;
}
}
#Select value for trimming low quality scores based on quality score plots
writeLines(c("","Truncation values for forward and reverse reads dictate where the reads are trimmed on the right.",
"These values should be based on quality scores. Ensure forward and reverse reads maintain overlap"))
trim1 <- readline("Please enter a truncation value for forward reads (240): ")
if(trim1 == "q"){stop()}
else if(trim1 == ""){trim1 = 240}
while(as.numeric(trim1) < 0 || as.numeric(trim1) > 300)
{
trim1 <- readline("Invalid entry, please enter a number based on quality scores plot: ")
if(trim1 == "q"){stop()}
else if(trim1 == ""){trim1 = 240}
}
trim2 <- readline("Please enter a truncation value for reverse reads (240): ")
if(trim2 == "q"){stop()}
else if(trim2 == ""){trim2 = 240}
while(as.numeric(trim2) < 0 || as.numeric(trim2) > 300)
{
trim2 <- readline("Invalid entry, please enter a number based on quality scores plot: ")
if(trim2 == "q"){stop()}
else if(trim2 == ""){trim2 = 240}
}
writeLines(c("","Left trim values for forward and reverse reads dictate where the reads are trimmed.",
"If your primers have not been removed yet enter trim values equal to primer length."))
trim3 <- readline("Please enter a left trim value for forward reads (0): ")
if(trim3 == "q"){stop()}
else if(trim3 == ""){trim3 = 0}
while(as.numeric(trim3) < 0 || as.numeric(trim3) > 80)
{
trim3 <- readline("Invalid entry, please enter 0 if your primers are removed, or the length of the primer in nucleotides if not: ")
if(trim3 == "q"){stop()}
else if(trim3 == ""){trim3 = 0}
}
trim4 <- readline("Please enter a left trim value for reverse reads (0): ")
if(trim4 == "q"){stop()}
else if(trim4 == ""){trim4 = 0}
while(as.numeric(trim4) < 0 || as.numeric(trim4) > 80)
{
trim4 <- readline("Invalid entry, please enter 0 if your primers are removed, or the length of the primer in nucleotides if not: ")
if(trim4 == "q"){stop()}
else if(trim4 == ""){trim4 = 0}
}
writeLines(c("","Max expected error values dictate how many error we expect for each read.",
"MaxEE values can be increased for lower quality reads or decreased for higher quality reads."))
maxEEF <- readline("Please enter the max expected error value for forward reads (2): ")
if(maxEEF == "q"){stop()}
else if(maxEEF == ""){maxEEF = 2}
while(as.numeric(maxEEF) < 0 || as.numeric(maxEEF) > 30)
{
maxEEF <- readline("Invalid entry, please enter a positive integer value: ")
if(maxEEF == "q"){stop()}
else if(maxEEF == ""){maxEEF = 2}
}
maxEER <- readline("Please enter the max expected error value for reverse reads (2): ")
if(maxEER == "q"){stop()}
else if(maxEER == ""){maxEER = 2}
while(as.numeric(maxEER) < 0 || as.numeric(maxEER) > 30)
{
maxEER <- readline("Invalid entry, please enter a positive integer value: ")
if(maxEER == "q"){stop()}
else if(maxEER == ""){maxEER = 2}
}
#Filter and trim data
writeLines("\nPerforming filtering and trimming (This may take some time)")
out <- filterAndTrim(fnFs, filtFs, fnRs, filtRs,
maxN=0, maxEE=c(as.numeric(maxEEF),as.numeric(maxEER)), truncQ=2, rm.phix=TRUE,
compress=TRUE, multithread=!windows, truncLen=c(as.numeric(trim1),as.numeric(trim2)),
trimLeft = c(as.numeric(trim3), as.numeric(trim4)))
writeLines("Done, filtered files written to 'Filtered' directory.")
#Learn errors for F reads
writeLines("\nLearning error rates and plotting to Output pdf (This may take some time)")
set.seed(100)
writeLines("Forward reads")
errF <- learnErrors(filtFs, multithread=TRUE, verbose=FALSE)
#Learn errors for R reads - also takes a while
writeLines("Reverse reads")
errR <- learnErrors(filtRs, multithread=TRUE, verbose = FALSE)
writeLines("\nTaking inputs for error plots.")
#Get parameters for error plots
#select plot width
w <- readline("Please enter the desired width for the error plots in inches (8.5): ")
if(w == "q"){stop()} else if(w == ""){w = 8.5}
while(as.numeric(w) < 0 || as.numeric(w) > 50)
{
w <- readline("Invalid entry, please enter a number for plot width in inches (8.5): ")
if(w == "q"){stop()} else if(w == ""){w = 8.5}
}
#Select plot height
h <- readline("Please enter the desired height for the error plots in inches (11): ")
if(h == "q"){stop()}else if(h == ""){h = 11}
while(as.numeric(h) < 0 || as.numeric(h) > 50)
{
h <- readline("Invalid entry, please enter a number for plot height in inches (11): ")
if(h == "q"){stop()} else if(h == ""){w = 11}
}
#Select font
f <- readline("Please enter the desired font for error plots (Helvetica): ")
if(f == "q"){stop()} else if(f == ""){f = "Helvetica"}
while(!(f %in% names(pdfFonts())))
{
disp <- readline("Error, invalid font. Would you like to see a list of valid fonts? [y/n]: ")
if(disp == "q"){stop()} else if(disp == 'y'){print(names(pdfFonts()))}
while(disp != 'y' && disp != 'n')
{
disp <- readline("Error, invalid selection. Enter y to see available fonts of n to enter font: ")
if(disp == "q"){stop()} else if(disp == 'y'){print(names(pdfFonts()))}
}
f <- readline("Please enter the desired font for error plots (Helvetica): ")
if(f == "q"){stop()} else if(f == ""){f = "Helvetica"}
}
#Select paper size (letter or custom)
p <- readline("Would you like generated pdf's size to be 8.5\"x11\"? (y) [y/n]: ")
if(p == "q"){stop()} else if(p == "" || p == 'y'){p = "default"} else if(p == 'n'){p = "special"}
while(p != "special" && p != "default")
{
p <- readline("Unexpected entry, please enter y for letter paper, or n for custom sizing: ")
if(p == "q"){stop()} else if(p == "" || p == 'y'){p = "default"} else if(p == 'n'){p = "special"}
}
#Print error plots to pdf
pdf(file = file.path(paste(paste(ofolder,ftd.folder, sep = "/"), paste(paste("Error_Plots",count, sep="_"),"pdf",sep="."), sep = '/')),
width = as.numeric(w),height = as.numeric(h), family = f, paper = p)
print(plotErrors(errF, nominalQ=TRUE))
print(plotErrors(errR, nominalQ=TRUE))
dev.off()
writeLines("Done")
#Run dereplication before running dada
writeLines("\nDereplicating data")
derepFs <- derepFastq(filtFs, verbose=FALSE)
derepRs <- derepFastq(filtRs, verbose=FALSE)
# Name the derep-class objects by the sample names
names(derepFs) <- sample.names
names(derepRs) <- sample.names
writeLines("Done")
#Get type of data pooling from user
p <- readline("Would you like to pool data for analysis (n) [y/n/p]: ")
if(p == "q"){stop()}
else if(p == ""){p = "n"}
while(p != 'y' && p != 'n' && p != 'p')
{
p <- readline("Unexpected selection, please enter n (non-pooled), p (pooled), or p (pseudo-pooled): ")
if(p == "q"){stop()}
else if(p == ""){p = "n"}
}
if(p == 'y'){
pooled <= TRUE
} else if(p == 'n'){
pooled <- FALSE
} else if(p == 'p'){
pooled <- "pseudo"
}
writeLines("Running dada2 algorithm on forward and reverse reads")
#Apply the core sample interference algorithm on forward and reverse reads
dadaFs <- dada(filtFs, err=errF, pool = pooled, multithread=TRUE)
writeLines(" ")
dadaRs <- dada(filtRs, err=errR, pool = pooled, multithread=TRUE)
writeLines("\nDone")
writeLines("\nForward reads dada summary:")
print(dadaFs[[1]])
writeLines("\nReverse reads dada summary:")
print(dadaRs[[1]])
writeLines("")
#Get parameters for merging from user
overlap <- readline("Enter the minimum overlap for merging forward and reverse reads (12): ")
if(overlap == "q"){stop()}
else if(overlap == ""){overlap = 12}
while(as.numeric(overlap) < 0 || as.numeric(overlap) > 80)
{
overlap <- readline("Invalid entry, please enter a positive integer value: ")
if(overlap == "q"){stop()}
else if(overlap == ""){overlap = 12}
}
mismatch <- readline("Enter the maximum allowed mismatch for merging forward and reverse reads (0): ")
if(mismatch == "q"){stop()}
else if(mismatch == ""){mismatch = 0}
while(as.numeric(mismatch) < 0 || as.numeric(mismatch) > 50)
{
mismatch <- readline("Invalid entry, please enter a positive integer value: ")
if(mismatch == "q"){stop()}
else if(mismatch == ""){mismatch = 0}
}
#Merge paired ends
writeLines("Merging forward and reverse reads")
mergers<-mergePairs(dadaFs, derepFs, dadaRs, derepRs, minOverlap = as.numeric(overlap), maxMismatch = as.numeric(mismatch),
returnRejects = FALSE, propagateCol = character(0), justConcatenate = FALSE, verbose = FALSE)
writeLines("Done\n")
#Make a sequence table and write to csv
seqtab <- makeSequenceTable(mergers)
write.csv(seqtab, file = paste(ofolder,"/",ftd.folder,"/","sequences",".csv",sep=''))
writeLines("Removing chimeras")
#Remove chimeras and write sequence table to csv
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=TRUE, verbose=TRUE)
write.csv(seqtab.nochim, file = paste(paste(ofolder,ftd.folder, sep = "/"),"/sequences_nochim",".csv",sep=""))
writeLines("Done")
#Check the outcome of removing chimeras
fraction = sum(seqtab.nochim)/sum(seqtab)
writeLines(paste("The fraction of sequences remaining after removing chimeras is", fraction, sep=" "))
#As a final check, look at the number of reads that made it through the pipeline at each step
writeLines("Writing summary of analysis to csv")
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(dadaFs, getN), sapply(dadaRs, getN), sapply(mergers, getN), rowSums(seqtab.nochim))
colnames(track) <- c("input", "filtered", "denoisedF", "denoisedR", "merged", "nonchim")
rownames(track) <- sample.names
write.csv(track,file=paste(paste(ofolder,ftd.folder, sep = "/"),"/","Summary",".csv",sep=''))
writeLines("Done\n")
writeLines("Writing summary of input parameters to txt file")
#Output summary of results
ofile <- paste(paste(paste(ofolder,ftd.folder, sep = "/"),"Parameters.txt", sep = "/"))
sink(ofile)
cat(paste("Forward reads truncation value:", trim1, sep=" "))
cat("\n")
cat(paste("Reverse reads truncation value:", trim2, sep=" "))
cat("\n")
cat(paste("Forward reads left trim value:", trim3, sep=" "))
cat("\n")
cat(paste("Reverse reads left trim value:", trim4, sep=" "))
cat("\n")
cat(paste("Forward reads maxEE:", maxEEF, sep=" "))
cat("\n")
cat(paste("Reverse reads maxEE:", maxEER, sep=" "))
cat("\n")
if(p == 'y'){
cat("Pooled = TRUE\n")
} else if(p == 'n'){
cat("Pooled = FALSE\n")
} else if(p == 'p'){
cat("Pooled = PSEUDO\n")
}
cat("\nForward reads dada summary:\n")
print(dadaFs[[1]])
cat("\nReverse reads dada summary:\n")
print(dadaRs[[1]])
cat("\n")
cat(paste("Minimum overlap value for merging:", overlap,sep=" "))
cat("\n")
cat(paste("Maximum mismatch value for merging:", mismatch,sep=" "))
cat("\n")
cat(paste("Results of removing chimeras:", sum(seqtab.nochim), "non chimeric seqs/", sum(seqtab), "original seqs =",fraction, sep=" "))
cat("\n")
sink()
writeLines("Done")
first = FALSE
lcv = readline("Would you like to re-run error analysis and dada algorithm step? [y/n]: ")
if(lcv == "q"){stop()}
count = count + 1
while(lcv != 'y' && lcv != 'n')
{
lcv <- readline("Unexpected selection, please enter y to run again, or n to move on: ")
if(lcv == "q"){stop()}
}
}
lcv = 'y'
writeLines(" ")
count = 1
while(lcv == 'y')
{
tax.folder <- paste(ofolder,"/","AssnTax_Run",count,sep = "")
#Create folder if it doesn't exist already
dir.create(file.path(tax.folder), showWarnings = FALSE)
#Set seed so it is more reproducible
set.seed(100)
#testing new DB making algorithm
db <- readline("Please enter the name of the database you are using for comparison: ")
if(db == "q"){stop()}
while(!file.exists(db))
{
db <- readline("Error, unable to open file, please check spelling and try again: ")
if(db == "q"){stop()}
}
#Create name for csv file
csv <- strsplit(db, ".",fixed=-T)[[1]][1]
csv <- paste(csv,count, sep="")
#Prompt for assignTaxonomy parameters
rc <- readline("Would you like to allow reverse compliment classification? (n) [y/n]: ")
if(rc == "q"){stop()}
else if(rc == ""){rc = "n"}
while(rc != 'y' && rc != 'n')
{
p <- readline("Unexpected selection, please enter y for RC classification, otherwise n: ")
if(rc == "q"){stop()}
else if(rc == ""){rc = "n"}
}
mb <- readline("Please enter the minimum bootstrap value (50): ")
if(mb == "q"){stop()}
else if(mb == ""){mb = 50}
while(as.numeric(mb) < 0 || as.numeric(mb) > 100)
{
mb <- readline("Error, invalid entry. Please enter a value from 0-100: ")
if(mb == "q"){stop()}
else if(mb == ""){mb = 50}
}
writeLines("\nAssigning taxonomy.")
if(rc == 'y')
{
taxa <- assignTaxonomy(seqtab.nochim, db, tryRC=TRUE, minBoot = as.numeric(mb))
}
else
{
taxa <- assignTaxonomy(seqtab.nochim, db, minBoot = as.numeric(mb))
}
writeLines("Done")
taxa.print <- taxa # Removing sequence rownames for display only
rownames(taxa.print) <- NULL
#writing results of assign taxonomy to csv file
writeLines("\nWriting output of taxonomy analysis to csv")
write.csv(taxa.print, file = paste(tax.folder,paste(csv,"csv", sep='.'), sep='/'))
writeLines("Done")
count = count + 1
lcv = readline("Would you like to re-run assign taxonomy? [y/n]: ")
if(lcv == "q"){stop()}
while(lcv != 'y' && lcv != 'n')
{
lcv <- readline("Unexpected selection, please enter y to run again, or n to move on: ")
if(lcv == "q"){stop()}
}
}
theme_set(theme_bw())
#Get parameters for plots
writeLines("\nTaking inputs for abundance plots.")
#select plot width
w <- readline("Please enter the desired width for the abundance plots in inches (11): ")
if(w == "q"){stop()} else if(w == ""){w = 11}
while(as.numeric(w) < 0 || as.numeric(w) > 50)
{
w <- readline("Invalid entry, please enter a number for plot width in inches (11): ")
if(w == "q"){stop()} else if(w == ""){w = 11}
}
#Select plot height
h <- readline("Please enter the desired height for the abundance plots in inches (8.5): ")
if(h == "q"){stop()}else if(h == ""){h = 8.5}
while(as.numeric(h) < 0 || as.numeric(h) > 50)
{
h <- readline("Invalid entry, please enter a number for plot height in inches (8.5): ")
if(h == "q"){stop()} else if(h == ""){w = 8.5}
}
#Select font
f <- readline("Please enter the desired font for abundance plots (Helvetica): ")
if(f == "q"){stop()} else if(f == ""){f = "Helvetica"}
while(!(f %in% names(pdfFonts())))
{
disp <- readline("Error, invalid font. Would you like to see a list of valid fonts? [y/n]: ")
if(disp == "q"){stop()} else if(disp == 'y'){print(names(pdfFonts()))}
while(disp != 'y' && disp != 'n')
{
disp <- readline("Error, invalid selection. Enter y to see available fonts of n to enter font: ")
if(disp == "q"){stop()} else if(disp == 'y'){print(names(pdfFonts()))}
}
f <- readline("Please enter the desired font for abundance plots (Helvetica): ")
if(f == "q"){stop()} else if(f == ""){f = "Helvetica"}
}
#Select paper size (letter or custom)
p <- readline("Would you like generated pdf's size to be landscape 8.5\"x11\"? (y) [y/n]: ")
if(p == "q"){stop()} else if(p == "" || p == 'y'){p = "a4r"} else if(p == 'n'){p = "special"}
while(p != "special" && p != "a4r")
{
p <- readline("Unexpected entry, please enter y for landscape letter paper, or n for custom sizing: ")
if(p == "q"){stop()} else if(p == "" || p == 'y'){p = "a4r"} else if(p == 'n'){p = "special"}
}
#Print abundance plots to pdf
ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE), tax_table(taxa))
ps.bar <- transform_sample_counts(ps, function(OTU) OTU/sum(OTU))
pdf(file = file.path(paste(ofolder, "Abundance_Plots.pdf",sep="/")),
width = as.numeric(w),height = as.numeric(h), family = f, paper = p)
print(plot_bar(ps.bar, fill = "Kingdom") + geom_bar(aes(color = Kingdom, fill = Kingdom), colour='black', stat = "identity", position = "stack") +
labs(x = "", y = "Relative Abundance\n") + scale_fill_brewer(palette = "Paired") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")))
print(plot_bar(ps.bar, fill = "Phylum") + geom_bar(aes(color = Phylum, fill = Phylum), colour='black', stat = "identity", position = "stack") +
labs(x = "", y = "Relative Abundance\n") + scale_fill_brewer(palette = "Paired") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")))
print(plot_bar(ps.bar, fill = "Class") + geom_bar(aes(color = Class, fill = Class), colour='black', stat = "identity", position = "stack") +
labs(x = "", y = "Relative Abundance\n") + scale_fill_brewer(palette = "Paired") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")))
print(plot_bar(ps.bar, fill = "Order") + geom_bar(aes(color = Order, fill = Order), colour='black', stat = "identity", position = "stack") +
labs(x = "", y = "Relative Abundance\n") + scale_fill_brewer(palette = "Paired") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")))
print(plot_bar(ps.bar, fill = "Family") + geom_bar(aes(color = Family, fill = Family), colour='black', stat = "identity", position = "stack") +
labs(x = "", y = "Relative Abundance\n") + scale_fill_brewer(palette = "Paired") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")))
print(plot_bar(ps.bar, fill = "Genus") + geom_bar(aes(color = Genus, fill = Genus), colour='black', stat = "identity", position = "stack") +
labs(x = "", y = "Relative Abundance\n") + scale_fill_brewer(palette = "Paired") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")))
print(plot_bar(ps.bar, fill = "Species") + geom_bar(aes(color = Species, fill = Species), colour='black', stat = "identity", position = "stack") +
labs(x = "", y = "Relative Abundance\n") + scale_fill_brewer(palette = "Paired") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")))
dev.off()
#Get parameters for plots
writeLines("\nTaking inputs for diversity plots.")
#select plot width
w <- readline("Please enter the desired width for the diversity plots in inches (6): ")
if(w == "q"){stop()} else if(w == ""){w = 6}
while(as.numeric(w) < 0 || as.numeric(w) > 50)
{
w <- readline("Invalid entry, please enter a number for plot width in inches (6): ")
if(w == "q"){stop()} else if(w == ""){w = 6}
}
#Select plot height
h <- readline("Please enter the desired height for the diversity plots in inches (6): ")
if(h == "q"){stop()}else if(h == ""){h = 6}
while(as.numeric(h) < 0 || as.numeric(h) > 50)
{
h <- readline("Invalid entry, please enter a number for plot height in inches (6): ")
if(h == "q"){stop()} else if(h == ""){w = 6}
}
#Select font
f <- readline("Please enter the desired font for diversity plots (Helvetica): ")
if(f == "q"){stop()} else if(f == ""){f = "Helvetica"}
while(!(f %in% names(pdfFonts())))
{
disp <- readline("Error, invalid font. Would you like to see a list of valid fonts? [y/n]: ")
if(disp == "q"){stop()} else if(disp == 'y'){print(names(pdfFonts()))}
while(disp != 'y' && disp != 'n')
{
disp <- readline("Error, invalid selection. Enter y to see available fonts of n to enter font: ")
if(disp == "q"){stop()} else if(disp == 'y'){print(names(pdfFonts()))}
}
f <- readline("Please enter the desired font for diversity plots (Helvetica): ")
if(f == "q"){stop()} else if(f == ""){f = "Helvetica"}
}
#Select paper size (letter or custom)
p <- readline("Would you like generated pdf's size to be 8.5\"x11\"? (y) [y/n]: ")
if(p == "q"){stop()} else if(p == "" || p == 'y'){p = "default"} else if(p == 'n'){p = "special"}
while(p != "special" && p != "default")
{
p <- readline("Unexpected entry, please enter y for letter paper, or n for custom sizing: ")
if(p == "q"){stop()} else if(p == "" || p == 'y'){p = "default"} else if(p == 'n'){p = "special"}
}
#Print diversity plots to pdf
# and now we can call the plot_richness() function on our phyloseq object
pdf(file = file.path(paste(ofolder, "Diversity_Plots.pdf",sep="/")),
width = as.numeric(w),height = as.numeric(h), family = f, paper = p)
print(plot_richness(ps, measures=c("Simpson", "Shannon"))) #add simpson instead of chao1, observed vs chao1
print(plot_richness(ps, measures=c("Observed", "Chao1")))
dev.off()
|
27ec602f27d050587214b73f54827d2c4c444227 | c5e078744bdf44109278a25a93a076c5609d85df | /store_distance_analysis_2018_03_03.R | 64d814735622935986d661ce1dd6ea0c790ae767 | [] | no_license | jshannon75/retailer_mobility | f50a32bcdc954d95abb58e5572868609b11daf62 | f96b7505557344cd18c2b313147a9beddef0c84c | refs/heads/master | 2021-09-15T07:22:04.276109 | 2018-05-28T14:15:13 | 2018-05-28T14:15:13 | 109,274,460 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,844 | r | store_distance_analysis_2018_03_03.R | library(sf)
library(rgdal)
library(tidyverse)
library(rgeos)
library(plm)
library(stargazer)
library(spdep)
library(car)
library(Hmisc)
library(ggbeeswarm)
##############################
## Set up data for models ####
storedist_modeldata<-read_csv("storedist_modeldata_2018_03_10.csv")
chain_select<-read_csv("atl_stlist_30more_2018_03_03.csv") %>%
filter(STTYPE!="Category") %>%
dplyr::select(st_name)
chain_select<-chain_select$st_name
chain_type<-storedist_modeldata %>%
select(store,st_name,STTYPE,sttype2) %>%
distinct()
cpal<-c("#d7191c", "#d8b365", "#2b83ba")
# modeldata_mean <- modeldata %>%
# gather(dist:snap_pct,key="var",value="value") %>%
# group_by(tractid,chain_name,var) %>%
# summarise(mean=mean(value)) %>%
# spread(var,mean)
#
# modeldata_mean_wide<-modeldata_mean %>%
# dplyr::select(-dist) %>%
# spread(chain_name,dist_1k)
#
# tracts_sp<-readOGR(".","tractdata_clusters")[,c(1,2,4)]
# #tracts_sp<-subset(tracts_sp,Atl_Core==1)
#
# modeldata_wide_sp<-merge(tracts_sp,modeldata_mean_wide)
# modeldata_wide_sp<-subset(modeldata_wide_sp,pop1k>0)
############################
## Fixed effects Models ####
model_fe_D1<-function(chain123,dv) {
plm(log(D1)~lag(afam_pct,1)+lag(asn_pct,1)+lag(hisp_pct,1)+
lag(povpop_pct,1)+lag(hh150k_pct,1)+lag(snap_pct,1)+lag(popden1k,1),
data=storedist_modeldata[storedist_modeldata$st_name==chain123,],
index=c("tract_id","year"))
}
models.d1<-lapply(chain_select,model_fe_D1)
models.d1_broom<-lapply(models.d1,broom::glance)
models.d1_broom_df<-bind_rows(models.d1_broom) %>%
mutate(st_name=chain_select,
model="D1")
#stargazer(models,title="Fixed effects models_D1",column.labels=chain_select,type="html",out="femodels_D1_2018_03_03.htm")
model_fe_D2<-function(chain123,dv) {
plm(log(D2)~lag(afam_pct,1)+lag(asn_pct,1)+lag(hisp_pct,1)+
lag(povpop_pct,1)+lag(hh150k_pct,1)+lag(snap_pct,1)+lag(popden1k,1),
data=storedist_modeldata[storedist_modeldata$st_name==chain123,],
index=c("tract_id","year"))
}
models.d2<-lapply(chain_select,model_fe_D2)
models.d2_broom<-lapply(models.d2,broom::glance)
models.d2_broom_df<-bind_rows(models.d2_broom) %>%
mutate(st_name=chain_select,
model="D2")
model_fe_D3<-function(chain123,dv) {
plm(log(D3)~lag(afam_pct,1)+lag(asn_pct,1)+lag(hisp_pct,1)+
lag(povpop_pct,1)+lag(hh150k_pct,1)+lag(snap_pct,1)+lag(popden1k,1),
data=storedist_modeldata[storedist_modeldata$st_name==chain123,],
index=c("tract_id","year"))
}
models.d3<-lapply(chain_select,model_fe_D3)
models.d3_broom<-lapply(models.d3,broom::glance)
models.d3_broom_df<-bind_rows(models.d3_broom) %>%
mutate(st_name=chain_select,
model="D3")
#stargazer(models,title="Fixed effects models_D3",column.labels=chain_select,type="html",out="femodels_D3_2018_03_03.htm")
model_fe_D4<-function(chain123,dv) {
plm(log(D4)~lag(afam_pct,1)+lag(asn_pct,1)+lag(hisp_pct,1)+
lag(povpop_pct,1)+lag(hh150k_pct,1)+lag(snap_pct,1)+lag(popden1k,1),
data=storedist_modeldata[storedist_modeldata$st_name==chain123,],
index=c("tract_id","year"))
}
models.d4<-lapply(chain_select,model_fe_D4)
models.d4_broom<-lapply(models.d4,broom::glance)
models.d4_broom_df<-bind_rows(models.d4_broom) %>%
mutate(st_name=chain_select,
model="D4")
model_fe_D5<-function(chain123,dv) {
plm(log(D5)~lag(afam_pct,1)+lag(asn_pct,1)+lag(hisp_pct,1)+
lag(povpop_pct,1)+lag(hh150k_pct,1)+lag(snap_pct,1)+lag(popden1k,1),
data=storedist_modeldata[storedist_modeldata$st_name==chain123,],
index=c("tract_id","year"))
}
models.d5<-lapply(chain_select,model_fe_D5)
models.d5_broom<-lapply(models.d5,broom::glance)
models.d5_broom_df<-bind_rows(models.d5_broom) %>%
mutate(st_name=chain_select,
model="D5")
models_all<-models.d1_broom_df %>%
bind_rows(models.d2_broom_df) %>%
bind_rows(models.d3_broom_df) %>%
bind_rows(models.d4_broom_df) %>%
bind_rows(models.d5_broom_df) %>%
left_join(chain_type) %>%
mutate(sttype2=factor(sttype2,
levels=c("Large retailer","Combination","Convenience store",
"Category")))
models_all_graph<-models_all %>%
filter(sttype2!="Category")
##Visualize global diagnostics
ggplot(models_all_graph,aes(x=model,y=r.squared,group=st_name,color=sttype2)) +
geom_point() + geom_line() +
geom_text(aes(label=if_else(model=="D5",as.character(st_name),'')),
hjust=0.2,vjust=-0.3,color="black")+
theme_minimal()+
scale_colour_manual(values=cpal)+
theme(legend.position="none")+
labs(x="",y="R2 value")+
facet_grid(sttype2~.,switch="y")
##Summarise all stores
models_all_mean<-models_all %>%
filter(sttype2=="Category") %>%
group_by(st_name) %>%
summarise(meanr2=mean(r.squared))
###########################
## Model coefficients
###########################
models.d1_tidy<-lapply(models.d1,broom::tidy)
chain_select_d1<-paste(chain_select,"_D1",sep="")
names(models.d1_tidy)<-chain_select_d1
models.d1_tidy_df<-bind_rows(models.d1_tidy,.id="store")
models.d2_tidy<-lapply(models.d2,broom::tidy)
chain_select_d2<-paste(chain_select,"_D2",sep="")
names(models.d2_tidy)<-chain_select_d2
models.d2_tidy_df<-bind_rows(models.d2_tidy,.id="store")
models.d3_tidy<-lapply(models.d3,broom::tidy)
chain_select_d3<-paste(chain_select,"_D3",sep="")
names(models.d3_tidy)<-chain_select_d3
models.d3_tidy_df<-bind_rows(models.d3_tidy,.id="store")
models.d4_tidy<-lapply(models.d4,broom::tidy)
chain_select_d4<-paste(chain_select,"_D4",sep="")
names(models.d4_tidy)<-chain_select_d4
models.d4_tidy_df<-bind_rows(models.d4_tidy,.id="store")
models.d5_tidy<-lapply(models.d5,broom::tidy)
chain_select_d5<-paste(chain_select,"_D5",sep="")
names(models.d5_tidy)<-chain_select_d5
models.d5_tidy_df<-bind_rows(models.d5_tidy,.id="store")
var_labels<-unique(models.d5_tidy_df$term)[1:6]
var_labels2<-c("% African-American","% Asian-American","% Hispanic","% HH in poverty",
"% HH w/$150k income","% w/SNAP")
labels<-data.frame(var_labels,var_labels2) %>%
rename("term"=var_labels)
models_tidy<-models.d1_tidy_df %>%
bind_rows(models.d2_tidy_df) %>%
bind_rows(models.d3_tidy_df) %>%
bind_rows(models.d4_tidy_df) %>%
bind_rows(models.d5_tidy_df) %>%
separate(store,c("st_name","var"),sep="_") %>%
left_join(chain_type) %>%
filter(p.value<0.05 & term!="lag(popden1k, 1)") %>%
left_join(labels) %>%
mutate(sttype2=factor(sttype2,
levels=c("Large retailer","Combination","Convenience store","Category")))
models_tidy_graph<-models_tidy %>%
filter(sttype2!="Category")
# ggplot(models_tidy_graph,aes(x=var,y=estimate,color=sttype2)) +
# geom_quasirandom(width=0.02,dodge.width=0.5)+
# facet_wrap(~var_labels2) +
# theme_minimal() +
# scale_colour_manual(values=cpal)
ggplot(models_tidy_graph,aes(x=var,y=estimate,group=store,color=sttype2)) +
geom_point()+geom_line()+
theme_minimal() +
scale_colour_manual(values=cpal)+
facet_wrap(~var_labels2)
#Bar graph just for D3
atl_stlist <- read_csv("atl_stlist_30more_2018_03_03.csv") %>%
filter(sttype2!="Category") %>%
arrange(sttype2,desc(st_name)) #Order by store type and store name
chain_select<-unique(atl_stlist$st_name)
models_tidy_graph_D3<-models_tidy %>%
filter(var=="D3" & sttype2!="Category") %>% #Subset the models
mutate(st_name=factor(st_name,levels=chain_select),
ci_low=estimate-2*std.error, #Can use more complicated t score in the futrue if need be
ci_high=estimate+2*std.error) %>%
dplyr::select(-statistic,-p.value,-std.error) %>%
gather(estimate,ci_low,ci_high,key="pointtype",value="value")
ggplot(models_tidy_graph_D3,aes(y=value,x=reorder(st_name,sttype2),color=sttype2)) +
geom_point(data=models_tidy_graph_D3[models_tidy_graph_D3$pointtype=="estimate",],
size=1.8)+
geom_line(size=0.7)+
coord_flip()+
theme_minimal()+
theme(axis.text.x=element_text(angle=45,hjust=1))+
geom_hline(yintercept=0,color="black")+
scale_colour_manual(values=cpal)+
ylab("Model coefficient and confidence interval")+xlab("")+
facet_wrap(~var_labels2,scales="free_y")
#Create table for average coefficient by store
models_tidy_table<-models_tidy %>%
group_by(st_name,var_labels2,sttype2) %>%
summarise(var_mean=round(mean(estimate),3)) %>%
spread(var_labels2,var_mean) %>%
arrange(sttype2)
write_csv(models_tidy_table,"Models_coeftable_2018_03_05.csv")
#Correlation for mean values of model variables ####
modeldata_wide<-data.frame(storedist_modeldata) %>%
dplyr::select(gisjn_tct,st_name,D3,afam_pct,asn_pct,hisp_pct,povpop_pct,hh150k_pct,snap_pct,popden1k) %>%
gather(D3:popden1k,key="var",value="value") %>%
group_by(gisjn_tct,st_name,var) %>%
summarise(mean_value=mean(value)) %>%
filter(is.na(mean_value)==FALSE) %>%
spread(var,mean_value)
modeldata_wide$gisjn_tct<-NULL
#Function below from http://www.sthda.com/english/wiki/correlation-matrix-a-quick-start-guide-to-analyze-format-and-visualize-a-correlation-matrix-using-r-software
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
correl_chain<-function(chain123){
subdata<-subset(modeldata_wide,st_name==chain123) %>%
dplyr::select(-st_name)
res2<-rcorr(as.matrix(subdata),type="spearman")
matrix<-flattenCorrMatrix(res2$r, res2$P)
round(cor(subdata),2)
matrix$chain_name<-chain123
matrix
}
correl_result_list<-lapply(chain_select,correl_chain)
correl_result<-do.call("rbind", correl_result_list)
correl_result$chain_name<-factor(correl_result$chain_name,levels=chain_select)
correl_result_select<-correl_result %>%
filter(row=="D3" | column=="D3") %>%
mutate(cor=round(cor,2),
p=round(p,2),
column=as.character(column),
row=as.character(row),
variable=if_else(row=="D3",column,row)) %>%
dplyr::select(-row,-column) %>%
mutate(sig=ifelse(p<.005,"***",ifelse(p<.01,"**",ifelse(p<.05,"*",""))),
cor_sig=paste(cor,sig,sep="")) %>%
dplyr::select(-p,-cor,-sig)%>%
spread(chain_name,cor_sig)
write_csv(correl_result_select,"D3_correlations_2018_03_30.csv")
##Attempt at heat map approach
ggplot(correl_result_wide,aes(chain_name,column))+
geom_tile(aes(fill=Y1))+
scale_fill_brewer(palette="RdYlGn")+
theme_minimal()
##########################
##Old models
model_fe_inc<-function(chain123) {
plm(log(dist_1k)~lag(povpop_pct,1)+lag(popden1k,1),
data=modeldata[modeldata$chain_name==chain123,],
index=c("tractid","year"))
}
models<-lapply(chain_select,model_fe_inc)
stargazer(models,title="Fixed effects models",column.labels=chain_select,type="html",out="femodels_inc_2017_10_19.htm")
#Break models out by race/class/SNAP
model_fe<-function(chain123) {
plm(log(dist_1k)~lag(afam_pct,1)+lag(asn_pct,1)+lag(hisp_pct,1)+
+lag(popden1k,1),
data=modeldata[modeldata$chain_name==chain123,],
index=c("tractid","year"))
}
models<-lapply(chain_select,model_fe)
stargazer(models,title="Fixed effects models",column.labels=chain_select,type="html",out="femodels_race_2017_07_24.htm")
model_fe<-function(chain123) {
plm(log(dist_1k)~lag(afam_pct,1)+lag(asn_pct,1)+lag(hisp_pct,1)+
lag(povpop_pct,1)+lag(hh150k_pct,1),
data=modeldata[modeldata$chain_name==chain123,],
index=c("tractid","year"))
}
models<-lapply(chain_select,model_fe)
stargazer(models,title="Fixed effects models",column.labels=chain_select,type="html",out="femodels_acs_2017_07_27.htm")
model_fe<-function(chain123) {
plm(log(dist_1k)~lag(povpop_pct,1)+lag(hh150k_pct,1)+lag(popden1k,1),
data=modeldata[modeldata$chain_name==chain123,],
index=c("tractid","year"))
}
models<-lapply(chain_select,model_fe)
stargazer(models,title="Fixed effects models",column.labels=chain_select,type="html",out="femodels_income_2017_07_24.htm")
model_fe<-function(chain123) {
plm(log(dist_1k)~lag(snap_pct,1)+lag(popden1k,1),
data=modeldata[modeldata$chain_name==chain123,],
index=c("tractid","year"))
}
models<-lapply(chain_select,model_fe)
stargazer(models,title="Fixed effects models",column.labels=chain_select,type="html",out="femodels_snap_2017_07_24.htm")
###Interpret coefficents as 1/100 of the rate of increase/decrease in distance per unit change.
hist(modeldata$popden1k)
hist(models[[1]]$residuals)
hist(models[[2]]$residuals)
hist(models[[3]]$residuals)
hist(models[[4]]$residuals)
hist(models[[5]]$residuals)
hist(models[[6]]$residuals)
hist(models[[7]]$residuals)
hist(models[[8]]$residuals)
hist(models[[9]]$residuals)
hist(models[[10]]$residuals)
#####################
# Spatial regression
#####################
#writeOGR(modeldata_wide_sp,".","tractdata_ua_distmean_2017_07_12",driver="ESRI Shapefile")
#Read weights
q4_wt<-read.gal("tractdata_ua_distmean_2017_07_12_q4wt.gal",region.id=modeldata_wide_sp$gisjn_tct)
q4<-nb2listw(q4_wt)
#Look at residuals
model<-lm(publix~afam_pct+asn_pct+povpop_pct+hh150k_pct+snap1k+pop1k+popden1k,
data=modeldata_wide_sp)
modeldata_wide_sp$residuals<-residuals(model)
moran.mc(modeldata_wide_sp$residuals,q4,99)
#Model
lm.LMtests(model, q4, test="all")
model_lag<-lagsarlm(shell.food~afam_pct+asn_pct+povpop_pct+hh150k_pct+snap_pct+popden1k,modeldata_wide_sp,q4)
summary(model_lag)
bptest.sarlm(model_lag)
model_err<-errorsarlm(publix~afam_pct+asn_pct+hisp_pct+povpop_pct+hh150k_pct+popden1k+snap1k,modeldata_wide_sp,q4)
summary(model_err)
bptest.sarlm(model_lag)
##Apply error model to list
chain_select<-c("walmart","target","kroger","publix","ingles.mar","dollar.gen","family.dol","shell.food","chevron.fo","cvs.pharma")
model_err<-function(chain123){
var<-subset(modeldata_wide_sp,select=c("gisjn_tct",chain123))
names(var)<-c("gisjn_tct","dist1k")
var<-data.frame(var)
modeldata_wide_sp<-merge(modeldata_wide_sp,var,by="gisjn_tct")
errorsarlm(log(dist1k)~afam_pct+asn_pct+hisp_pct+povpop_pct+hh150k_pct+snap_pct+popden1k,modeldata_wide_sp,q4)
}
errormodels<-lapply(chain_select,model_err)
stargazer(errormodels,title="Spatial error models",column.labels=chain_select,type="html",out="errormodels_ua_2017_07_21.htm")
##With interaction terms
model_err_int<-function(chain123){
var<-subset(modeldata_wide_sp,select=c("gisjn_tct",chain123))
names(var)<-c("gisjn_tct","dist1k")
var<-data.frame(var)
modeldata_wide_sp<-merge(modeldata_wide_sp,var,by="gisjn_tct")
errorsarlm(dist1k~afam_pct*povpop_pct+asn_pct*povpop_pct+povpop_pct+hh150k_pct+snap_pct+log(popden1k)+pop1k,modeldata_wide_sp,q4)
}
summary(model_err_int("publix"))
errormodels<-lapply(chain_select,model_err_int)
stargazer(errormodels,title="Spatial error models",column.labels=chain_select,type="html",out="errormodels_interact_ua_2017_07_14.htm")
###########################
##Cross sectional models (Now using spatial error model instead...)
###########################
###Check variable correlation
modeldata_mean <- modeldata %>%
gather(dist:snap_pct,key="var",value="value") %>%
group_by(tractid,chain_name,var) %>%
summarise(mean=mean(value)) %>%
spread(var,mean)
modelvar<-modeldata_mean[,c(6,3,4,9,12,8,11,15)] %>% filter(pop1k>0)
modeldata_cor<-data.frame(cor(modelvar)) %>%
mutate(var2=row.names(.)) %>%
gather(dist_1k:snap1k,key="var1",value="value")
ggplot(modeldata_cor,aes(var1,var2))+
geom_tile(aes(fill=value))+
scale_fill_distiller(palette = "Spectral")+
geom_text(aes(label=round(value,2)))
###Modeling
model_lm<-function(chain123) {
lm(log(dist_1k)~afam_pct*povpop_pct+asn_pct*povpop_pct+hisp_pct*povpop_pct+hh150k_pct+pop1k+snap1k,
data=modeldata_mean[modeldata_mean$chain_name==chain123,])}
summary(model_lm("publix"))
modelresult_lm<-lapply(chain_select,model_lm)
stargazer(modelresult_lm,title="Linear regresison models",column.labels=chain_select,type="html",out="lmmodels_2017_07_11.htm")
model_all<-lm(log(dist_1k)~afam_pct+asn_pct+povpop_pct+hh150k_pct+snap1k+pop1k,
data=modeldata_mean)
sqrt(vif(model_all))>2
hist(model_all$residuals)
hist(modelresult_lm[[1]]$residuals)
hist(modelresult_lm[[2]]$residuals)
hist(modelresult_lm[[3]]$residuals)
hist(modelresult_lm[[4]]$residuals)
hist(modelresult_lm[[5]]$residuals)
hist(modelresult_lm[[6]]$residuals)
hist(modelresult_lm[[7]]$residuals)
hist(modelresult_lm[[8]]$residuals)
hist(modelresult_lm[[9]]$residuals)
hist(modelresult_lm[[10]]$residuals)
ggplot(modeldata_mean,aes(x=afam_pct,y=dist_1k)) + geom_point() + facet_wrap(~chain_name)
ggplot(modeldata_mean,aes(x=povpop_pct,y=dist_1k)) + geom_point() + facet_wrap(~chain_name)
ggplot(modeldata_mean,aes(x=snap1k,y=dist_1k)) + geom_point() + facet_wrap(~chain_name)
########################
# Calculate change in variables by tract
########################
modeldata_change <- modeldata %>%
filter(year=="Y2008" | year=="Y2013") %>%
select(gisjn_tct,year,afam_pct,asn_pct,hisp_pct,povpop_pct,hh150k_pct,snap_pct,popden1k) %>%
gather(afam_pct,asn_pct,hisp_pct,povpop_pct,hh150k_pct,snap_pct,popden1k,
key="var",value="value") %>%
unique() %>%
spread(year,value) %>%
mutate(var_chg=Y2013-Y2008) %>%
select(-Y2013,-Y2008) %>%
spread(var,var_chg)
#####################
## OLD CODE for setup
#####################
############################
# Subset stores
############################
stores<-st_read("Data/GA_SNAPstores.shp") %>% st_transform(32616)
names<-read_csv("chain_temp.csv")
tractdata<-read_csv("GAtracts_stcount_atl.csv")[,c(1:4,17:25,72)] %>%
mutate(pop1k=totpop_pov/1000,
snap1k=snap_enroll/1000)
tracts<-st_read("tractdata_clusters.shp")[,c(1,4)]
chain_select<-c("walmart","target","kroger","publix","ingles.mar","dollar.gen","family.dol","shell.food","chevron.fo","cvs.pharma")
stores_select<-stores %>%
gather(2:7,key="year",value="value") %>%
filter(value==1) %>%
gather(walmart:big.lots,key="chain",value="value1") %>%
filter(value1==1 & chain %in% chain_select)
stores_select$value<-NULL
stores_select$value1<-NULL
stores_subset<-function(store,year){
store_name<-store
year_name<-year
store_subset<-subset(stores_select,chain==store_name&year_name==year)
store_subset
}
stores_subset("walmart","Y2008")
stores_subset_all<-function(chainname){
storename<-chainname
store1<-stores_subset(storename,"Y2008")
store2<-stores_subset(storename,"Y2009")
store3<-stores_subset(storename,"Y2010")
store4<-stores_subset(storename,"Y2011")
store5<-stores_subset(storename,"Y2012")
store6<-stores_subset(storename,"Y2013")
st_write(store1,paste(storename,"_Y2008.shp",sep=""))
st_write(store2,paste(storename,"_Y2009.shp",sep=""))
st_write(store3,paste(storename,"_Y2010.shp",sep=""))
st_write(store4,paste(storename,"_Y2011.shp",sep=""))
st_write(store5,paste(storename,"_Y2012.shp",sep=""))
st_write(store6,paste(storename,"_Y2013.shp",sep=""))
}
lapply(chain_select,stores_subset_all)
########################################
# Distances
#######################################
blocks<-readOGR(".","atl_blocks")
blocks<-spTransform(blocks,CRS("+init=epsg:32616 +proj=utm +zone=16 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
storeFiles_names <- list.files(path='dist_raster/storepoints2/',pattern='.shp')
storeFiles_names<-substr(storeFiles_names,1,nchar(storeFiles_names)-4)
storeDist<-function(filename){
storepoints<-readOGR("dist_raster/storepoints2",filename)
storepoints<-spTransform(storepoints,CRS("+init=epsg:32616 +proj=utm +zone=16 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
knn<-gDistance(blocks,storepoints,byid=TRUE)
knn_short<-apply(knn, 2, min)
knn_short
}
storeDist("chevron.fo_Y2008")
testfiles<-lapply(storeFiles_names,storeDist)
testfiles_df<-as.data.frame(testfiles)
names(testfiles_df)<-storeFiles_names
testfiles_df<-data.frame(data.frame(cbind(blocks[,c(6,13,11,12,14)]),testfiles_df))
write_csv(testfiles_df,"storedist_2017_07_03.csv")
##########
# Summarise to tracts
##########
storedist<-read_csv("storedist_2017_07_03.csv")
storedist_tct<-storedist %>%
gather(chevron.fo_Y2008:walmart_Y2013,key="id",value="distance") %>%
mutate(weight=Pop2010*distance) %>%
group_by(tract_id,id) %>%
summarise(weightmean=sum(weight)/sum(Pop2010)) %>%
spread(id,weightmean) %>%
rename(tractid=tract_id)
write_csv(storedist_tct,"storedist_tct_2017_07_03.csv")
###########
# Join tract data
###########
storedist_tct<-read_csv("storedist_tct_2017_07_03.csv") %>%
gather(chevron.fo_Y2008:walmart_Y2013,key="store",value="dist") %>%
separate(store,sep="_",c("chain_name","year"))
storedist_tct_all<-left_join(storedist_tct,tractdata)
write_csv(storedist_tct_all,"storedist_tct_data_2017_07_10.csv")
storedist_tct_spread<-storedist_tct %>%
gather(chevron.fo:walmart,key="store",value="dist") %>%
mutate(store_yr=paste(substr(store,1,6),substr(year,4,5),sep="_")) %>%
dplyr::select(c("tractid","store_yr","dist")) %>%
spread(store_yr,dist)
storedist_tct_spread_all<-left_join(tracts,storedist_tct_spread)
st_write(storedist_tct_spread_all,"storedist_tct_data_shp_2017_07_10.shp")
#Calculate pop density
x <- shapefile("C:/Users/jshannon/Dropbox/Jschool/GIS data/Census/Urban areas_2013/Tract_UA_Atlanta_individual.shp")
crs(x)
x$area_sqkm <- area(x) / 1000000
mapview(x,zcol="area_sqkm")
modeldata<-read_csv("storedist_tct_data_2017_07_10.csv") %>%
mutate(dist_1k=dist/1000) %>%
filter(totpop_pov>5)
modeldata<-merge(modeldata,x[,c(1,5)])
modeldata$popden1k<-modeldata$totpop_pov/modeldata$area_sqkm/1000
#Calculate SNAP rate
modeldata$snap_pct<-modeldata$snap_enroll/(modeldata$totpop_pov)*100
write_csv(modeldata,"storedist_tct_data_2017_07_14.csv")
|
8143adbedf076d3c7ec749e1b30d4a54d591602b | d97091d79bbbc29f541e61c3bc00cef2c9788fa0 | /R/wrapper_primer.R | b375aded18154a9d188d9278271df1a9c2bc0c20 | [] | no_license | baptiste/adda | 4310e9180487c63c0f0ef3e087245a4cb45163b8 | c65d2b60ea98bd3a7ea9910acd010c65ab975167 | refs/heads/master | 2020-04-16T01:44:08.345077 | 2016-07-15T04:16:07 | 2016-07-15T04:16:07 | 10,132,097 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,254 | r | wrapper_primer.R |
## @knitr invisible, echo=FALSE, results='hide'
library(knitr)
opts_chunk$set(cache=TRUE, fig.width=10, tidy=FALSE)
library(ggplot2)
theme_set(theme_minimal() + theme(panel.background=element_rect(fill=NA)))
## @knitr setup
library(dielectric) # dielectric function of Au and Ag
library(plyr) # convenient functions to loop over parameters
library(reshape2) # reshaping data from/to long format
library(ggplot2) # plotting framework
## @knitr wrapper
adda_spectrum <- function(shape = "ellipsoid",
euler = c(0, 0, 0),
AR = 1.3,
wavelength = 500,
radius = 20,
n = 1.5 + 0.2i ,
medium.index = 1.46,
dpl = ceiling(min(50, 20 * abs(n))),
test = TRUE, verbose=TRUE, ...) {
command <- paste("echo ../adda/src/seq/adda -shape ", shape, 1/AR, 1/AR,
"-orient ", paste(euler, collapse=" "),
"-lambda ", wavelength*1e-3 / medium.index ,
"-dpl ", dpl,
"-size ", 2 * radius*1e-3,
"-m ", Re(n) / medium.index ,
Im(n) / medium.index , ...)
if(verbose)
message(system(command, intern=TRUE))
if(test) return() # don't actually run the command
# extract the results of interest
resultadda <- system(paste(command, "| bash"), intern = TRUE)
Cext <- as.numeric(unlist(strsplit(grep("Cext",resultadda,val=T)[1:2],s="="))[c(2,4)])
Cabs <- as.numeric(unlist(strsplit(grep("Cabs",resultadda,val=T)[1:2],s="="))[c(2,4)])
Csca <- Cext - Cabs
c(Cext[1], Cext[2], Cabs[1], Cabs[2], Csca[1], Csca[2])
}
# testing that it works
adda_spectrum(test = FALSE)
## @knitr basic
gold <- epsAu(seq(400, 700, length=100))
str(gold)
## empty matrix to store the results
results <- matrix(ncol=6, nrow=nrow(gold))
## loop over the wavelengths
for( ii in 1:nrow(gold) ){
results[ii, ] <- adda_spectrum(wavelength = gold$wavelength[ii],
n = sqrt(gold$epsilon[ii]),
radius = 20, AR = 1.3, dpl=50,
test = FALSE, verbose = FALSE)
}
str(results)
## basic plot
matplot(gold$wavelength, results,
type = "l", col = rep(1:3, each=2),
lty = rep(1:2, 3),
xlab = "Wavelength /nm",
ylab = expression("Cross-sections /"*nm^2),
main = "Au ellipsoid")
legend("topleft", legend=expression(sigma[ext],sigma[abs],sigma[sca], "",
"x-polarisation", "y-polarisation"),
inset=0.01, col=c(1:3, NA, 1, 1), lty=c(1,1,1, NA, 1, 2),
bg = "grey95", box.col=NA)
## @knitr simulation, fig.height=4
gold <- epsAu(seq(400, 700, length=200))
simulation <- function(radius = 20, AR = 1.3, ..., material=gold){
params <- data.frame(wavelength = material$wavelength,
n = sqrt(material$epsilon),
radius = radius,
AR = AR)
results <- mdply(params, adda_spectrum, ..., test=FALSE)
m <- melt(results, measure.vars = c("V1","V2","V3","V4","V5","V6"))
m$polarisation <- m$type <- factor(m$variable)
levels(m$polarisation) <- list(x = c('V1','V3','V5'),
y = c('V2','V4','V6'))
levels(m$type) <- list(extinction = c('V1','V2'),
absorption = c('V3','V4'),
scattering = c('V5','V6'))
m
}
test <- simulation(radius = 20, AR = 1.3, verbose = FALSE)
str(test)
qplot(wavelength, value, colour = polarisation,
facets = ~ type, data = test, geom = 'line')
## @knitr multiple
params <- expand.grid(radius = c(20, 22),
AR = c(1.2, 1.3))
all <- mdply(params, simulation, verbose = FALSE)
ggplot(all, aes(wavelength, value,
linetype = polarisation, colour = factor(AR),
group = interaction(polarisation, AR))) +
facet_grid(type~radius, scales='free') +
geom_line() + labs(x = 'wavelength /nm',
y = expression(sigma/nm^2),
colour = 'aspect ratio') +
scale_colour_brewer(palette = 'Set1')
|
5bf9c670a4d89719c4779de0705af85bfd37f493 | 96393de930d88333dd2dcc6d36715109d1ca8355 | /PrizeExplore.R | 13198efd2e050f7df1fad0df7cb4225756bd9970 | [] | no_license | lotterdata/proj_4_bootcamp | 3fcc84f48cde4f0c7d31afa7c082c754df69eb48 | 784499716581b3f0625205d24b8e15b57efc0ad2 | refs/heads/master | 2021-01-10T07:35:23.153489 | 2015-11-29T17:30:16 | 2015-11-29T17:30:16 | 45,945,825 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 760 | r | PrizeExplore.R | library(DBI)
library(RPostgreSQL)
library(dplyr)
ExplorePrizes <- function(game,prize){
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, host = 'localhost', dbname = 'lotterydata')
sql.text <- paste("select date_part('year',drawdate)+date_part('month',drawdate)/12.0 as month, avg(",prize,") from",game,
"group by date_part('year',drawdate)+date_part('month',drawdate)/12.0",
"order by date_part('year',drawdate)+date_part('month',drawdate)/12.0")
res <- dbSendQuery(con,sql.text)
prize.data <- fetch(res,-1)
dbDisconnect(con)
plot(prize.data$month,prize.data$avg,
type = 'n',
xlab = "Month",
ylab = "Average Prize")
lines(prize.data$month,prize.data$avg)
return(prize.data)
} |
6c408d79a53cd4dee95ed24ad04979e2f6387e2c | 5ae3520829595ff481754ba0f1ccbd804f9d0b3a | /polymorphisms/gis_analysis/gis_analysis/polymorphism_gis_analysis.R | ab8e85b9644144368111c9968309a6ce3c882fdd | [] | no_license | DataSciBurgoon/arsenic_polymorphism | 92e299919512f18589dff0fd5facbb71040da2e3 | f1c80be496b248a8840efe4d106a4c41cdb5376b | refs/heads/master | 2021-04-28T07:42:23.676878 | 2018-02-20T17:16:42 | 2018-02-20T17:16:42 | 122,229,711 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,420 | r | polymorphism_gis_analysis.R | ################################################################################
# polymorphism_gis_analysis.R
#
################################################################################
library(zipcode)
library(ggmap)
library(ggplot2)
library(tidyr)
library(rgeos)
library(sp)
library(parallel)
#Read in the Census data
setwd("../../census_data/ACS_14_5YR_DP05")
us_census_race_ethnicity_data <- read.csv("ACS_14_5YR_DP05.csv", header=TRUE, skip=1,
check.names = FALSE)
setwd("../../gis_analysis/gis_analysis")
#Add in the lat/long values
data("zipcode")
us_census_race_ethnicity_data$Zip <- clean.zipcodes(us_census_race_ethnicity_data$Id2)
us_census_race_ethnicity_data <- merge(us_census_race_ethnicity_data, zipcode, by.x="Zip", by.y = "zip")
#Only want certain columns from the census data
estimate_columns <- c("Estimate; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race) - Mexican",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race) - Puerto Rican",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race) - Cuban",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race) - Other Hispanic or Latino",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - White alone",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Black or African American alone",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - American Indian and Alaska Native alone",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Asian alone",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Native Hawaiian and Other Pacific Islander alone",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Some other race alone",
"Estimate; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Two or more races")
all_necessary_columns <- c(estimate_columns, "latitude", "longitude")
us_census_race_ethnicity_data_trimmed <- us_census_race_ethnicity_data[, which(colnames(us_census_race_ethnicity_data) %in% all_necessary_columns)]
#Genotype frequencies in each population
genotype_frequencies <- read.table("rs11191439.txt", sep="\t", header=TRUE)
weighted_avg_genome_freqs <- by(genotype_frequencies, genotype_frequencies$Larger.Group, function(x) weighted.mean(x$C_Freq, x$Count), simplify=FALSE)
global_average <- weighted.mean(genotype_frequencies$C_Freq, genotype_frequencies$Count)
#Now I need to put these genotype frequencies into the right order, and use the global average when we have no other information
genotype_ordered <- c(weighted_avg_genome_freqs$Mexican,
weighted_avg_genome_freqs$`Puerto Rican`,
global_average,
global_average,
weighted_avg_genome_freqs$White,
weighted_avg_genome_freqs$African,
global_average,
weighted_avg_genome_freqs$Asian,
global_average,
global_average,
global_average)
#census_genotype_freqs <- us_census_race_ethicity_data_trimmed[, 1:11] * t(as.matrix(genotype_ordered))
prod_fun <- function(x, y){
x * y
}
t_census_genotype_freqs <- apply(as.matrix(us_census_race_ethnicity_data_trimmed[, 1:11]),
1,
prod_fun,
y=t(as.matrix(genotype_ordered)))
#Want to keep this so that the rows are the zip codes
census_genotype_freqs <- t(t_census_genotype_freqs)
#Aggregate the number of genetically susceptible people by zipcode
agg_census_genotype_by_latlong <- rowSums(census_genotype_freqs)
#Aggregate the population for each zipcode
agg_census_total_population_by_latlong <- rowSums(as.matrix(us_census_race_ethnicity_data_trimmed[, 1:11]))
#Add back in the geocoordinates
agg_census_genotype_by_latlong <- cbind(susc_individuals = agg_census_genotype_by_latlong,
latitude = us_census_race_ethnicity_data_trimmed$latitude,
longitude = us_census_race_ethnicity_data_trimmed$longitude)
agg_census_total_population_by_latlong <- cbind(population = agg_census_total_population_by_latlong,
latitude = us_census_race_ethnicity_data_trimmed$latitude,
longitude = us_census_race_ethnicity_data_trimmed$longitude)
prop_at_risk_census_by_latlong <- data.frame(agg_census_genotype_by_latlong)$susc_individuals / data.frame(agg_census_total_population_by_latlong)$population
prop_at_risk_census_by_latlong <- cbind(proportion = prop_at_risk_census_by_latlong,
latitude = us_census_race_ethnicity_data_trimmed$latitude,
longitude = us_census_race_ethnicity_data_trimmed$longitude)
prop_at_risk_census_by_latlong <- as.data.frame(prop_at_risk_census_by_latlong)
#Let's map where these susceptible individuals live
us_map <- get_map("united states", zoom=4)
puerto_rico_map <- get_map("puerto rico", zoom=9)
nc_map <- get_map("north carolina", zoom=7)
#NC bounding box
#34.996, -84.33
#33.84, -78.54
#36.55, -75.85
#36.58, -81.68
agg_census_genotype_by_latlong <- as.data.frame(agg_census_genotype_by_latlong)
agg_census_genotype_by_latlong_threshold <- agg_census_genotype_by_latlong[which(agg_census_genotype_by_latlong$susc_individuals > 5000), ]
nc_data <- subset(agg_census_genotype_by_latlong,
-84.33 <= longitude & longitude <= -75.85 &
33.84 <= latitude & latitude <= 36.58)
ggmap(us_map) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=susc_individuals),
data=as.data.frame(agg_census_genotype_by_latlong), alpha=.30, na.rm = T, size=.5) +
scale_color_gradient(low="beige", high="dark red")
png("us_susceptible_individuals_map.png", height=700, width=700)
agg_census_genotype_by_latlong <- as.data.frame(agg_census_genotype_by_latlong)
agg_census_genotype_by_latlong_threshold <- agg_census_genotype_by_latlong[which(agg_census_genotype_by_latlong$susc_individuals > 5000), ]
ggmap(us_map) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=susc_individuals),
data=agg_census_genotype_by_latlong_threshold, alpha=.30, na.rm = T, size=3) +
scale_color_gradient(low="red", high="dark red")
dev.off()
ggmap(puerto_rico_map) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=susc_individuals),
data=as.data.frame(agg_census_genotype_by_latlong), alpha=.50, na.rm = T, size=3) +
scale_color_gradient(low="red", high="dark red")
ggmap(nc_map) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=susc_individuals),
data=nc_data, alpha=.50, na.rm = T, size=3) +
scale_color_gradient(low="beige", high="dark red")
gg_us_map <- ggmap(us_map, extent='device')
gg_pr_map <- ggmap(puerto_rico_map, extent='device')
gg_nc_map <- ggmap(nc_map, extent='device')
ggmap(us_map, extent='device', maprange = FALSE) +
geom_density2d(data=agg_census_genotype_by_latlong_threshold,
aes(x=longitude, y=latitude),
size=0.3) +
stat_density2d(
aes(x = longitude, y = latitude, fill = ..level.., alpha=..level..),
size = 2, bins = 15, data = agg_census_genotype_by_latlong_threshold,
geom = "polygon") +
scale_fill_gradient(low="beige", high="blue") +
scale_alpha(range = c(.4, .75), guide = FALSE) +
guides(fill = guide_colorbar(barwidth = 1.5, barheight = 10))
ggmap(puerto_rico_map, extent='device', maprange = FALSE) +
geom_density2d(data=as.data.frame(agg_census_genotype_by_latlong),
aes(x=longitude, y=latitude),
size=0.3) +
stat_density2d(
aes(x = longitude, y = latitude, fill = ..level.., alpha=..level..),
size = 2, bins = 10, data = as.data.frame(agg_census_genotype_by_latlong),
geom = "polygon") +
scale_fill_gradient(low="beige", high="blue") +
scale_alpha(range = c(.4, .75), guide = FALSE) +
guides(fill = guide_colorbar(barwidth = 1.5, barheight = 10))
ggmap(nc_map, extent='device', maprange = FALSE) +
geom_density2d(data=nc_data,
aes(x=longitude, y=latitude),
size=0.3) +
stat_density2d(
aes(x = longitude, y = latitude, fill = ..level.., alpha=..level..),
size = 2, bins = 4, data = nc_data,
geom = "polygon") +
scale_fill_gradient(low="beige", high="blue") +
scale_alpha(range = c(.4, .75), guide = FALSE) +
guides(fill = guide_colorbar(barwidth = 1.5, barheight = 10))
# Based on Beebe-Dimmer, et al (http://ehjournal.biomedcentral.com/articles/10.1186/1476-069X-11-43)
# Odds ratio for bladder cancer increases 1.7x for rs11191439 per each 1ug/L
# increase in arsenic in the water
# Bringing in the USGS data on arsenic in the groundwater through 2001 -- it's
# a bit dated, but it's the best data we have available to us at this time
usgs_arsenic_data <- read.table("arsenic_nov2001_usgs.txt", sep="\t", header=TRUE)
usgs_arsenic_data <- usgs_arsenic_data[, c(10:12)]
colnames(usgs_arsenic_data) <- c("concentration", "latitude", "longitude")
usgs_arsenic_data$longitude <- -1 * usgs_arsenic_data$longitude
usgs_geospatial_odds_ratio <- usgs_arsenic_data$concentration * 1.7
usgs_geospatial_odds_ratio <- cbind(odds_ratio = usgs_geospatial_odds_ratio,
latitude = usgs_arsenic_data$latitude,
longitude = usgs_arsenic_data$longitude)
usgs_geospatial_odds_ratio <- as.data.frame(usgs_geospatial_odds_ratio)
usgs_latlong <- usgs_geospatial_odds_ratio[, 2:3]
census_latlong <- prop_at_risk_census_by_latlong[, 2:3]
#set1sp <- SpatialPoints(usgs_latlong)
#set2sp <- SpatialPoints(census_latlong)
#This next step takes a LONG time to run
#set1$nearest_in_set2 <- apply(gDistance(set1sp, set2sp, byid=TRUE), 1, which.min)
library(geosphere)
# create distance matrix
mat <- distm(usgs_geospatial_odds_ratio[,c("longitude", "latitude")], census_latlong[,c("longitude", "latitude")], fun=distCosine)
# assign the name to the point in list1 based on shortest distance in the matrix
#list1$locality <- list2$locality[apply(mat, 1, which.min)]
no_cores <- detectCores() - 1
cl <- makeCluster(no_cores)
mat_min_row <- parRapply(cl, mat, which.min)
stopCluster(cl)
usgs_x_census_latitude <- census_latlong$latitude[mat_min_row]
usgs_x_census_longitude <- census_latlong$longitude[mat_min_row]
usgs_prop_at_risk <- prop_at_risk_census_by_latlong$proportion[mat_min_row]
usgs_concentration_x_census <- cbind(concentration = usgs_arsenic_data$concentration,
latitude=usgs_x_census_latitude,
longitude=usgs_x_census_longitude)
usgs_concentration_x_census <- as.data.frame(usgs_concentration_x_census)
png("usgs_arsenic_ground_water_concentrations.png", width=700, height=700)
ggmap(us_map) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=log10(concentration)),
data=usgs_concentration_x_census, alpha=0.8, na.rm = T) +
scale_color_gradient(low="yellow", high="dark red")
dev.off()
#http://stats.stackexchange.com/questions/131416/converting-adjusted-odds-ratios-to-its-rr-counterpart
#Relative Risk=Odds Ratio/((1–p0)+(p0∗Odds Ratio))
#PAR: PAR = Pe*(RRe-1)/([1 + Pe*(RRe-1)])
p0 <- 0.437 #from Beebe-Dimmer, et al
usgs_geospatial_rr <- (usgs_geospatial_odds_ratio$odds_ratio) / ((1-p0)+(p0*usgs_geospatial_odds_ratio$odds_ratio))
usgs_geospatial_par <- (usgs_prop_at_risk * (usgs_geospatial_rr-1))/(1 + usgs_prop_at_risk * (usgs_geospatial_rr - 1))
usgs_geospatial_par_latlong <- cbind(par = usgs_geospatial_par,
latitude = usgs_x_census_latitude,
longitude = usgs_x_census_longitude)
ggmap(us_map, extent='device', maprange = FALSE) +
geom_density2d(data=as.data.frame(usgs_geospatial_par_latlong),
aes(x=longitude, y=latitude),
size=0.3) +
stat_density2d(
aes(x = longitude, y = latitude, fill = ..level.., alpha=..level..),
size = 2, bins = 10, data = as.data.frame(usgs_geospatial_par_latlong),
geom = "polygon") +
scale_fill_gradient(low="beige", high="blue") +
scale_alpha(range = c(.4, .75), guide = FALSE) +
guides(fill = guide_colorbar(barwidth = 1.5, barheight = 10))
usgs_geospatial_par_latlong <- as.data.frame(usgs_geospatial_par_latlong)
usgs_geospatial_par_incidence_latlong <- cbind(par_incidence = usgs_geospatial_par_latlong$par * data.frame(agg_census_total_population_by_latlong)$population[mat_min_row],
latitude = usgs_x_census_latitude,
longitude = usgs_x_census_longitude)
#Population attributable risk incidence map
ggmap(us_map, extent='device', maprange = FALSE) +
geom_density2d(data=as.data.frame(usgs_geospatial_par_incidence_latlong),
aes(x=longitude, y=latitude),
size=0.3) +
stat_density2d(
aes(x = longitude, y = latitude, fill = ..level.., alpha=..level..),
size = 2, bins = 10, data = as.data.frame(usgs_geospatial_par_incidence_latlong),
geom = "polygon") +
scale_fill_gradient(low="beige", high="blue") +
scale_alpha(range = c(.4, .75), guide = FALSE) +
guides(fill = guide_colorbar(barwidth = 1.5, barheight = 10))
us_map2 <- get_map("united states", zoom=4, maptype="hybrid")
png("population_attributable_risk_incidence_cases_us-wide.png", width=3000, height=3000, res=300)
ggmap(us_map2) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=par_incidence, size=par_incidence),
data=as.data.frame(usgs_geospatial_par_incidence_latlong), alpha=0.8, na.rm = T) +
scale_color_gradient(low="light blue", high="dark red")
dev.off()
png("population_attributable_risk_incidence_cases_us-wide_state_boundaries.png", width=3000, height=3000, res=300)
ggmap(us_map) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=par_incidence, size=par_incidence),
data=as.data.frame(usgs_geospatial_par_incidence_latlong), alpha=0.8, na.rm = T) +
scale_color_gradient(low="light blue", high="dark red")
dev.off()
#Where is the PAR the highest?
hist(as.data.frame(usgs_geospatial_par_incidence_latlong)$par_incidence)
cutoff <- quantile(as.data.frame(usgs_geospatial_par_incidence_latlong)$par_incidence, probs=0.75, na.rm = TRUE)
usgs_geospatial_par_incidence_latlong <- as.data.frame(usgs_geospatial_par_incidence_latlong)
usgs_geospatial_par_incidence_latlong_threshold <- usgs_geospatial_par_incidence_latlong[which(usgs_geospatial_par_incidence_latlong$par_incidence >= cutoff), ]
ggmap(us_map) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=par_incidence, size=par_incidence),
data=usgs_geospatial_par_incidence_latlong_threshold, alpha=0.8, na.rm = T) +
scale_color_gradient(low="light blue", high="dark red")
png("par_map_us_arsenic_groundwater.png", height=700, width=700)
ggmap(us_map) + geom_point(
aes(x=longitude, y=latitude, show_guide = TRUE, colour=par_incidence),
data=usgs_geospatial_par_incidence_latlong_threshold, alpha=0.8, na.rm = T, size=3) +
scale_color_gradient(low="orange", high="purple")
dev.off()
#Posterior probability of bladder cancer in adults with arsenic exposure > 3.72ppb:
#Note: the prior is the US bladder cancer incidence, which includes the entire US population
# thus it's likely to be an underestimate of the true prior for the genotype.
prior_prob_bladder_cancer <- 20.1/100000 #http://seer.cancer.gov/statfacts/html/urinb.html on August 29, 2016
p_arsenic_given_bladder_cancer <- 0.70 #http://ehjournal.biomedcentral.com/articles/10.1186/1476-069X-11-43
denominator <- (prior_prob_bladder_cancer * p_arsenic_given_bladder_cancer) + (0.30 * (1 - prior_prob_bladder_cancer))
posterior_bladder_cancer_given_arsenic <- (prior_prob_bladder_cancer * p_arsenic_given_bladder_cancer) / denominator
bayes_factor <- (posterior_bladder_cancer_given_arsenic / (1 - posterior_bladder_cancer_given_arsenic)) / (prior_prob_bladder_cancer/ (1 - prior_prob_bladder_cancer))
posterior_bladder_cancer_given_arsenic * 100000 #incidence per 100,000 people is 112.5
#Let's redo this posterior analysis, but this time we're going to add in some uncertainty
#And this will be for lifetime cancer risk:
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
# THE MODEL.
bladder_cancer_modelString = "
data {
int<lower=0> N; //number of items
int y; // y number of successes
}
parameters {
real <lower=0, upper=1> theta;
}
model {
theta ~ beta(0.000201*100000, (1-0.000201)*100000);
y ~ binomial(N, theta);
}
"
#So in this model, I decided that we actually DON'T know what the prior actually
#should be. I'm using a flat prior here.
# THE MODEL.
bladder_cancer_modelString = "
data {
int<lower=0> N; //number of items
int y; // y number of successes
}
parameters {
real <lower=0, upper=1> theta;
real<lower=0,upper=1> lambda; // prior mean chance of success
real<lower=0.1> kappa; // prior count
}
transformed parameters {
real<lower=0> alpha; // prior success count
real<lower=0> beta; // prior failure count
alpha <- lambda * kappa;
beta <- (1 - lambda) * kappa;
}
model {
lambda ~ uniform(0,1); // hyperprior
kappa ~ pareto(0.1,1.5); // hyperprior
theta ~ beta(alpha,beta);
y ~ binomial(N, theta);
}
"
writeLines(bladder_cancer_modelString , con="TEMPmodel.txt" )
stanDso <- stan_model( model_code=bladder_cancer_modelString )
N <- 20
y <- 14
dataList <- list( y = y , N = N)
disease_allele_stanFit <- sampling( object = stanDso , data = dataList , chains = 3 , iter = 5000 ,
warmup = 200 , thin = 1,
control=list(adapt_delta=0.99))
stan_hist(disease_allele_stanFit)
posterior_dist <- extract(disease_allele_stanFit)[[1]]
mean(posterior_dist)
quantile(posterior_dist, c(0.05)) #boundary on 95% HDI
max(posterior_dist) #upper boundary on 95% HDI
#95% HDI: [0.52, 0.96]; mean 0.69
#Let's do the same for the ancestral allele
N <- 102
y <- 41
dataList <- list( y = y , N = N)
ancestor_allele_stanFit <- sampling( object = stanDso , data = dataList , chains = 3,
iter = 5000 , warmup = 200 , thin = 1,
control=list(adapt_delta=0.99))
stan_hist(ancestor_allele_stanFit)
posterior_dist <- extract(ancestor_allele_stanFit)[[1]]
mean(posterior_dist)
quantile(posterior_dist, c(0.05)) #boundary on 95% HDI
max(posterior_dist) #upper boundary on 95% HDI
#95% HDI: [0.32, 0.59]; mean 0.40
#Keep in mind that the posteriors are sensitive to differences in the N values.
#If you have a larger N, then the prior is weighted less, and that has a huge
#influence. So I chose to keep the N values constant, and change the y values
#accordingly.
#Posterior odds ratio
#3.34
(.69/(1-.69))/(.40/(1-.40))
###################
#Bayes Analysis 2
# Going out on a limb here...based on a study from NCI
# it said 20% greater incidence in a New England sample
# when exposed to arsenic in their drinking water compared to US average
# http://jnci.oxfordjournals.org/content/108/9/djw099.abstract
# So in this model, I'm going to assume that the prior probability is like
# 22%.
# THE MODEL.
ne_prior_bladder_cancer_modelString = "
data {
int<lower=0> N; //number of items
int y; // y number of successes
}
parameters {
real <lower=0, upper=1> theta;
}
model {
theta ~ beta(1.15, 4);
y ~ binomial(N, theta);
}
"
writeLines(ne_prior_bladder_cancer_modelString , con="TEMPmodel.txt" )
ne_prior_stanDso <- stan_model(model_code=ne_prior_bladder_cancer_modelString )
N <- 20
y <- 14
dataList <- list( y = y , N = N)
disease_allele_stanFit <- sampling( object = ne_prior_stanDso , data = dataList , chains = 3 , iter = 5000 ,
warmup = 200 , thin = 1,
control=list(adapt_delta=0.99))
stan_hist(disease_allele_stanFit)
posterior_dist <- extract(disease_allele_stanFit)[[1]]
mean(posterior_dist)
quantile(posterior_dist, c(0.05)) #boundary on 95% HDI
max(posterior_dist) #upper boundary on 95% HDI
#95% HDI: [0.44, 0.88]; mean 0.60
#Let's do the same for the ancestral allele
N <- 102
y <- 41
dataList <- list( y = y , N = N)
ancestor_allele_stanFit <- sampling( object = ne_prior_stanDso , data = dataList , chains = 3,
iter = 5000 , warmup = 200 , thin = 1,
control=list(adapt_delta=0.99))
stan_hist(ancestor_allele_stanFit)
posterior_dist <- extract(ancestor_allele_stanFit)[[1]]
mean(posterior_dist)
quantile(posterior_dist, c(0.05)) #boundary on 95% HDI
max(posterior_dist) #upper boundary on 95% HDI
#95% HDI: [0.32, 0.56]; mean 0.39
#Keep in mind that the posteriors are sensitive to differences in the N values.
#If you have a larger N, then the prior is weighted less, and that has a huge
#influence. So I chose to keep the N values constant, and change the y values
#accordingly.
#Posterior odds ratio
#2.35
(.60/(1-.60))/(.39/(1-.39))
#Number of wells in the US that have 3ppm or more arsenic based on USGS data 33%
length(which(usgs_arsenic_data$concentration >= 3)) / length(usgs_arsenic_data$concentration)
library(gRain)
yn <- c("yes", "no")
races <- c("mexican", "puerto_rican", "cuban", "other_latino", "white", "black",
"native", "asian", "hawaiian_pacific", "other")
r1 <- cptable(~race, values=c(rep(.1, 10)), levels=races)
g1 <- cptable(~genotype:race, values=c(.07, .93,
.18, .82,
round(global_average,2), 1-round(global_average,2),
round(global_average,2), 1-round(global_average,2),
.10, .90,
.13, .87,
round(global_average,2), 1-round(global_average,2),
round(weighted_avg_genome_freqs$Asian,2), 1-round(weighted_avg_genome_freqs$Asian,2),
round(global_average,2), 1-round(global_average,2),
round(global_average,2), 1-round(global_average,2)),
levels=yn)
w1 <- cptable(~arsenic_water, values=c(.33, .67), levels=yn)
c1 <- cptable(~cancer|genotype:arsenic_water,
values=c(.70, .30, .40, .60, .41, .59, .36, .64),
levels=yn)
plist <- compileCPT(list(r1, g1, w1, c1))
arsenic_cancer_bn <- grain(plist)
querygrain(setEvidence(arsenic_cancer_bn, evidence=list(race="asian", arsenic_water="yes")))
#Posterior probability of bladder cancer in adults with arsenic exposure > 3.72ppb:
#Note: the prior is the US bladder cancer incidence, which includes the entire US population
# thus it's likely to be an underestimate of the true prior for the genotype.
prior_prob_bladder_cancer <- 20.1/100000 #http://seer.cancer.gov/statfacts/html/urinb.html on August 29, 2016
p_arsenic_given_bladder_cancer <- 0.70 #http://ehjournal.biomedcentral.com/articles/10.1186/1476-069X-11-43
denominator <- (prior_prob_bladder_cancer * p_arsenic_given_bladder_cancer) + (0.30 * (1 - prior_prob_bladder_cancer))
posterior_bladder_cancer_given_arsenic <- (prior_prob_bladder_cancer * p_arsenic_given_bladder_cancer) / denominator
bayes_factor <- (posterior_bladder_cancer_given_arsenic / (1 - posterior_bladder_cancer_given_arsenic)) / (prior_prob_bladder_cancer/ (1 - prior_prob_bladder_cancer))
posterior_bladder_cancer_given_arsenic * 100000 #incidence per 100,000 people
|
60fa8c714a6b8c5fc684839c5e217aad53f034e7 | 815b653a737474b62b6288da8dff2844430417bb | /man/otp_make_config.Rd | 41ad85aca435a2d9d11ecea6d4cb0b0c046c8a56 | [] | no_license | cran/opentripplanner | 6c7d7ab5d5f8248d43607a33fc920652437df2ea | bd9469eb61b88d2638ca48cc59bfe30a1561dcd1 | refs/heads/master | 2023-02-13T21:33:46.252919 | 2023-02-02T16:30:02 | 2023-02-02T16:30:02 | 236,634,375 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,103 | rd | otp_make_config.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/otp-config.R
\name{otp_make_config}
\alias{otp_make_config}
\title{Make Config Object}
\usage{
otp_make_config(type, version = 1)
}
\arguments{
\item{type}{Which type of config file to create, "otp", "build", "router"}
\item{version}{version of OPT e.g. 1 or 2}
}
\description{
OTP can be configured using three json files `otp-config.json`,
`build-config.json`, and `router-config.json`. This function
creates a named list for each config file and
populates the defaults values.
}
\details{
For more details see:
http://docs.opentripplanner.org/en/latest/Configuration
}
\examples{
{
conf <- otp_make_config("build")
conf <- otp_make_config("router")
}
}
\seealso{
Other setup:
\code{\link{otp_build_graph}()},
\code{\link{otp_check_java}()},
\code{\link{otp_check_version}()},
\code{\link{otp_dl_demo}()},
\code{\link{otp_dl_jar}()},
\code{\link{otp_setup}()},
\code{\link{otp_stop}()},
\code{\link{otp_validate_config}()},
\code{\link{otp_write_config}()}
}
\concept{setup}
|
859f2beda0ee4c89f5bf071bbc6c840e810538e6 | bb30d4b7bb46c2d19668cf1712536621b0504202 | /data_visualization/scripts_v120/predict_deaths_common_age_make_figure.R | 98885e6f02628a2a6c1797ccbe1b15a24f179af1 | [
"CC-BY-4.0"
] | permissive | isabella232/US-covid19-agespecific-mortality-data | 550f221d42af801646985a69ac83515d5bf4bb3f | 961e2272cdd5c310b7a6f7a4d1f3d860249e325a | refs/heads/master | 2023-06-14T07:08:08.655324 | 2021-05-02T17:33:26 | 2021-05-02T17:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,068 | r | predict_deaths_common_age_make_figure.R | library(rstan)
library(data.table)
tempdir = "~/git/US-covid19-data-scraping/data_visualization/results_predict_deaths_common_age_strata"
args_line <- as.list(commandArgs(trailingOnly=TRUE))
if(length(args_line) > 0)
{
stopifnot(args_line[[1]]=='-tempdir')
args <- list()
tempdir <- args_line[[2]]
}
indir = "~/git/US-covid19-data-scraping" # path to the repo
stan_model = "201023o"
path.to.deathByAge.data = file.path(indir, "data", "processed", "2020-10-29", "DeathsByAge_US.csv")
path.to.demographics.data = file.path(indir, "data_visualization", "data", "us_population_withnyc.rds")
path.to.stan.model = file.path(indir, "data_visualization", "stan-models", paste0("predict_DeathsByAge_", stan_model, ".stan"))
source(file.path(indir, "data_visualization", "functions", "data-visualization-summary_functions.R"))
source(file.path(indir, "data_visualization", "functions", "data-visualization-stan_utility_functions.R"))
set.seed(3312122)
run_index = round(runif(1,0, 10000))
run_tag = paste0(stan_model, "_", run_index)
outdir.fit = file.path(tempdir, run_tag, "fits")
outdir.fig = file.path(tempdir, run_tag, "figures")
outdir.table = file.path(tempdir, run_tag, "table")
cat("outfile.dir is ", file.path(tempdir, run_tag))
dir.create(file.path(tempdir, run_tag), showWarnings = FALSE)
dir.create(outdir.fit, showWarnings = FALSE)
dir.create(outdir.table, showWarnings = FALSE)
dir.create(outdir.fig, showWarnings = FALSE)
dir.create(file.path(outdir.fig, "convergence_diagnostics"), showWarnings = FALSE)
dir.create(file.path(outdir.fig, "posterior_predictive_checks"), showWarnings = FALSE)
dir.create(file.path(outdir.fig, "continuous_contribution"), showWarnings = FALSE)
#
# read demographics by age to get location label
pop_count = as.data.table( read_pop_count_by_age_us(path.to.demographics.data) )
setnames(pop_count, "state", "loc_label")
pop_info = unique(select(pop_count, code, loc_label))
#
# Read death by age
deathByAge = as.data.table( read.csv( path.to.deathByAge.data ) )
set(deathByAge, NULL, 'date', deathByAge[,as.Date(date)])
deathByAge = merge(deathByAge, pop_info, by = c("code"))
# stratify by month
deathByAge[, month := format(date, "%m")]
death_summary_month = deathByAge[, list(cum.deaths = max(cum.deaths),
monthly_deaths = sum(daily.deaths),
date = max(date)), by = c("code", "age", "loc_label", "month")]
# find age from and age to
age_max = 105
death_summary_month[, age_from := as.numeric(ifelse(grepl("\\+", age), gsub("(.+)\\+", "\\1", age), gsub("(.+)-.*", "\\1", age)))]
death_summary_month[, age_to := as.numeric(ifelse(grepl("\\+", age), age_max, gsub(".*-(.+)", "\\1", age)))]
#
# Create age maps
# create map continuous
df_age_continuous = data.table(age_from = 0:age_max,
age_to = 0:age_max,
age_index = 0:age_max,
age = c(0.1, 1:age_max))
# create map for reporting age groups
df_age_reporting = data.table(age_from = c(0,10,20,35,50,65,80),
age_to = c(9,19,34,49,64,79,age_max),
age_index = 1:7,
age_cat = c("0-9", "10-19", "20-34", "35-49", "50-64", "65-79", "80+"))
df_age_reporting[, age_from_index := which(df_age_continuous$age_from == age_from), by = "age_cat"]
df_age_reporting[, age_to_index := which(df_age_continuous$age_to == age_to), by = "age_cat"]
# create map for 4 new age groups
df_ntl_age_strata = data.table(age_cat = c("0-24", "25-49", "50-74", "75+"),
age_from = c(0, 25, 50, 75),
age_to = c(24, 49, 74, age_max),
age_index = 1:4)
df_ntl_age_strata[, age_from_index := which(df_age_continuous$age_from == age_from), by = "age_cat"]
df_ntl_age_strata[, age_to_index := which(df_age_continuous$age_to == age_to), by = "age_cat"]
#
# find locations and dates
locations = unique(death_summary_month$code[death_summary_month$code != "US"])
dates = unique(death_summary_month$date)
#
# House-keeping
predictive_checks_table = vector(mode = "list", length = nrow(unique(select(death_summary_month, code, date))))
eff_sample_size_cum = vector(mode = "list", length = nrow(unique(select(death_summary_month, code, date))))
Rhat_cum = vector(mode = "list", length = nrow(unique(select(death_summary_month, code, date))))
eff_sample_size_monthly = vector(mode = "list", length = nrow(unique(select(death_summary_month, code, date))))
Rhat_monthly = vector(mode = "list", length = nrow(unique(select(death_summary_month, code, date))))
j = 1
#
# For every state
for(m in 1:length(locations)){
#m = 12
Code = locations[m]
cat("Location ", as.character(Code), "\n")
tmp = subset(death_summary_month, code == Code)
tmp = tmp[order(date, age_from)]
stopifnot(all(tmp$age_from <= tmp$age_to))
# create map of original age groups
df_state_age_strata = unique(select(tmp, age_from, age_to, age))
df_state_age_strata[, age_index := 1:nrow(df_state_age_strata)]
df_state_age_strata[, age_from_index := which(df_age_continuous$age_from == age_from), by = "age"]
df_state_age_strata[, age_to_index := which(df_age_continuous$age_to == age_to), by = "age"]
# stan data
stan_data = list(
A = nrow(df_age_continuous),
age = df_age_continuous$age,
age2 = (df_age_continuous$age)^2,
B = nrow(df_state_age_strata),
age_from_state_age_strata = df_state_age_strata$age_from_index,
age_to_state_age_strata = df_state_age_strata$age_to_index,
C = nrow(df_ntl_age_strata),
age_from_ntl_age_strata = df_ntl_age_strata$age_from_index,
age_to_ntl_age_strata = df_ntl_age_strata$age_to_index,
D = nrow(df_age_reporting),
age_from_reporting_age_strata = df_age_reporting$age_from_index,
age_to_reporting_age_strata = df_age_reporting$age_to_index
)
#
# Fit for every month
for(t in 1:nrow(unique(select(tmp, code, date)))){
#t = 1
Date = unique(tmp$date)[t]
Month = unique(tmp$month)[t]
cat("Location ", as.character(Code), "\n")
cat("Month ", as.character(Month), "\n")
tmp1 = subset(tmp, month == Month)
cat("Start sampling \n")
#
# fit cumulative deaths
stan_data$deaths = tmp1$cum.deaths
file = file.path(outdir.fit, paste0("fit_cumulative_deaths_", Code, "_", Month, "_",run_tag,".rds"))
fit_cum <- readRDS(file=file)
#
# fit monthly deaths
cat("Monthly \n")
monthly_less_1 = 0
if(t != 1){
stan_data$deaths = tmp1$monthly_deaths
if(sum(stan_data$deaths) <= 1){ # we cannot fit the model if the sum of deaths is less than 1
monthly_less_1 = 1
} else{
file = file.path(outdir.fit, paste0("fit_monthly_deaths_", Code, "_", Month, "_",run_tag,".rds"))
fit_monthly <- readRDS(file=file)
}
} else{fit_monthly = NULL}
#
# Convergence diagnostics
cat("\nMake convergence diagnostics \n")
summary = rstan::summary(fit_cum)$summary
eff_sample_size_cum[[j]] = summary[,9][!is.na(summary[,9])]
Rhat_cum[[j]] = summary[,10][!is.na(summary[,10])]
cat("the minimum and maximum effective sample size are ", range(eff_sample_size_cum[[j]]), "\n")
cat("the minimum and maximum Rhat are ", range(Rhat_cum[[j]]), "\n")
stopifnot(min(eff_sample_size_cum[[j]]) > 500)
if(!monthly_less_1 & t != 1){
summary = rstan::summary(fit_monthly)$summary
eff_sample_size_monthly[[j]] = summary[,9][!is.na(summary[,9])]
Rhat_monthly[[j]] = summary[,10][!is.na(summary[,10])]
cat("the minimum and maximum effective sample size are ", range(eff_sample_size_monthly[[j]]), "\n")
cat("the minimum and maximum Rhat are ", range(Rhat_monthly[[j]]), "\n")
stopifnot(min(eff_sample_size_monthly[[j]]) > 500)
}
posterior_cum <- as.array(fit_cum)
p1_trace = bayesplot::mcmc_trace(posterior_cum, regex_pars = c("beta", "v_inflation")) + labs(title = "Cumulative deaths fit")
p1_pairs = gridExtra::arrangeGrob(bayesplot::mcmc_pairs(posterior_cum, regex_pars = c("beta", "v_inflation")), top = "Cumulative deaths fit")
p1_intervals = bayesplot::mcmc_intervals(posterior_cum, regex_pars = c("beta", "v_inflation")) + labs(title = "Cumulative deaths fit")
if(!monthly_less_1 & t != 1){
posterior_monthly <- as.array(fit_monthly)
p2_trace = bayesplot::mcmc_trace(posterior_monthly, regex_pars = c("beta", "v_inflation")) + labs(title = "Monthly deaths fit")
p2_pairs = gridExtra::arrangeGrob(bayesplot::mcmc_pairs(posterior_monthly, regex_pars = c("beta", "v_inflation")), top = "Monthly deaths fit")
p2_intervals = bayesplot::mcmc_intervals(posterior_monthly, regex_pars = c("beta", "v_inflation"), probs = 0.95) + labs(title = "Monthly deaths fit")
} else{
p2_trace = ggplot()
p2_pairs = ggplot()
p2_intervals = ggplot()
}
p_trace = gridExtra::grid.arrange(p1_trace, p2_trace, nrow = 2, top = paste(Code, "month", Month))
p_pairs = gridExtra::grid.arrange(p1_pairs, p2_pairs, top = paste(Code, "month", Month))
p_intervals = gridExtra::grid.arrange(p1_intervals, p2_intervals, top = paste(Code, "month", Month))
ggsave(p_trace, file = file.path(outdir.fig, "convergence_diagnostics", paste0("trace_plots_", Code, "_", Month, "_", run_tag,".png") ), w= 8, h = 8)
ggsave(p_pairs, file = file.path(outdir.fig, "convergence_diagnostics", paste0("pairs_plots_", Code, "_", Month, "_", run_tag,".png") ), w= 8, h = 10)
ggsave(p_intervals, file = file.path(outdir.fig, "convergence_diagnostics", paste0("intervals_plots_", Code, "_", Month, "_", run_tag,".png") ), w= 8, h = 8)
#
# Plots predictive checks
# Make predictive checks table
cat("\nMake posterior predive checks table \n")
pc_cum = make_predictive_checks_table(fit_cum, "deaths_cum", tmp1, df_state_age_strata)
if(monthly_less_1 | t == 1){
pc_monthly = copy(pc_cum)
pc_monthly[, `:=`(M_deaths_monthly = NA,
CL_deaths_monthly = NA,
CU_deaths_monthly = NA)]
pc_monthly = select(pc_monthly, -CL_deaths_cum, -CU_deaths_cum, -M_deaths_cum)
}else{
pc_monthly = make_predictive_checks_table(fit_monthly, "deaths_monthly", tmp1, df_state_age_strata)
}
predictive_checks_table[[j]] = merge(pc_cum, pc_monthly, by = c("age", "code", "date", "cum.deaths", "age_from", "age_to", "monthly_deaths", "month", "loc_label"))
# plot
cat("\nMake posterior predive checks plots \n")
p_cum = plot_posterior_predictive_checks(predictive_checks_table[[j]], variable = "cum.deaths", variable_abbr = "deaths_cum", lab = "Cumulative COVID-19 deaths", Code, Month)
p_monthly = plot_posterior_predictive_checks(pc_monthly, variable = "monthly_deaths", variable_abbr = "deaths_monthly", lab = "Monthly COVID-19 deaths", Code, Month)
ggsave(gridExtra::grid.arrange(p_cum[[1]]), file = file.path(outdir.fig, "posterior_predictive_checks", paste0("posterior_predictive_checks_cum_", Code, "_", Month, "_", run_tag,".png") ), w= 8, h = 6)
ggsave(gridExtra::grid.arrange(p_monthly[[1]]), file = file.path(outdir.fig, "posterior_predictive_checks", paste0("posterior_predictive_checks_monthly_", Code, "_", Month, "_", run_tag,".png") ), w= 8, h = 6)
#
# Plots continuous age distribution pi
cat("\nMake continuous age distribution plots \n")
pi_predict_cum = plot_continuous_age_contribution(fit_cum, df_age_continuous, "cumulative COVID-19 deaths", Code, Month)
if(!monthly_less_1 & t != 1){
pi_predict_monthly = plot_continuous_age_contribution(fit_monthly, df_age_continuous, "monthly COVID-19 deaths", Code, Month)
} else{
pi_predict_monthly = ggplot()
}
ggsave(pi_predict_cum, file = file.path(outdir.fig, "continuous_contribution", paste0("pi_predict_cum", "_",Code, "_", Month, "_", run_tag,".png") ), w= 8, h = 6)
ggsave(pi_predict_monthly, file = file.path(outdir.fig, "continuous_contribution", paste0("pi_predict_monthly", "_",Code, "_", Month, "_", run_tag,".png") ), w= 8, h = 6)
j = j + 1
}
}
#
# Save
cat("\nSave \n")
predictive_checks_table = do.call("rbind", predictive_checks_table)
saveRDS(predictive_checks_table, file = file.path(outdir.table, "deaths_predict_state_age_strata.rds"))
eff_sample_size_cum = as.vector(unlist(eff_sample_size_cum))
saveRDS(eff_sample_size_cum, file = file.path(outdir.table, "eff_sample_size_cum.rds"))
eff_sample_size_monthly = as.vector(unlist(eff_sample_size_monthly))
saveRDS(eff_sample_size_monthly, file = file.path(outdir.table, "eff_sample_size_monthly.rds"))
Rhat_cum = as.vector(unlist(Rhat_cum))
saveRDS(Rhat_cum, file = file.path(outdir.table, "Rhat_cum.rds"))
Rhat_monthly = as.vector(unlist(Rhat_monthly))
saveRDS(Rhat_monthly, file = file.path(outdir.table, "Rhat_monthly.rds"))
|
940409551b2dd891fc383a9c8616435972a9fc14 | b0004ba3e4e7b72d441680fbfd6736288889a999 | /R/wordmap_nuvem_de_palavras.R | 9efc85ede429864bc7b8e6d51e38d8c7d10cebd0 | [
"MIT"
] | permissive | guibridi/rds2_final | 4b9ea25333463f3a33429efcd7930f46c89e9945 | 710dc4207e27ee038e7adffe58df42c5650af602 | refs/heads/master | 2023-04-24T12:07:35.431819 | 2021-05-10T20:29:20 | 2021-05-10T20:29:20 | 366,144,983 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,737 | r | wordmap_nuvem_de_palavras.R | # library(wordcloud)
# library(RColorBrewer)
# library(wordcloud2)
# library(tidyverse)
# library(janitor)
# library(tm)
# Criando um vetor contendo apenas texto
paralisadas <- readr::read_rds("data-raw/paralisadas.rds")
dplyr::glimpse(paralisadas)
texto <- paralisadas$motivo
# Criando um corpus
docs <- tm::Corpus(tm::VectorSource(texto))
# Limpando os textos
docs <- docs %>%
tm::tm_map(tm::removeNumbers) %>%
tm::tm_map(tm::removePunctuation) %>%
tm::tm_map(tm::stripWhitespace)
docs <- tm::tm_map(docs, tm::content_transformer(tolower))
docs <- tm::tm_map(docs, tm::removeWords, tm::stopwords('portuguese'))
# Criando um "document-term-matrix" Matriz de termos do documento
dtm <- tm::TermDocumentMatrix(docs)
matriz <- as.matrix(dtm)
palavras <- sort(rowSums(matriz), decreasing = TRUE)
df <- data.frame(word = names(palavras), freq = palavras)
# Criando o mapa de palavras
wordcloud::wordcloud(
words = df$word,
freq = df$freq,
min.freq = 5,
max.words = 200,
random.order = FALSE,
rot.per = 0.35,
scale = c(3.5, 0.25),
colors = RColorBrewer::brewer.pal(8, "Dark2")
)
# Alternativamente, pode-se utilizar o pacote wordcloud2 (que é visualmente mais interessante)
wordcloud2::wordcloud2(data = df, size = 1.6, color = 'random-dark')
wordcloud2::wordcloud2(data = df, size = 0.5, shape = 'diamond')
# wordcloud2(data = df, size = 1, minSize = 0, gridSize = 0,
# fontFamily = 'Segoe UI', fontWeight = 'bold',
# color = 'random-dark', backgroundColor = "white",
# minRotation = -pi/4, maxRotation = pi/4, shuffle = TRUE,
# rotateRatio = 0.4, shape = 'circle', ellipticity = 0.65,
# widgetsize = NULL, figPath = NULL, hoverFunction = NULL)
|
866f0e11a03619a7d90ee8d6aec3bf7810ca7698 | 5631f3c66312278bf846af25ccae5fef69d9a44d | /plot1.R | bde0102cd4084d38d156c16b36c1122413fa0766 | [] | no_license | stuartspern/ExData_Plotting1 | 413a524d79a15a859d2a9dc68e831ac7d6053851 | ad3c2eff0eede81f5ea449fb37143e6a1fd46358 | refs/heads/master | 2020-12-31T03:03:02.834368 | 2016-05-01T18:02:40 | 2016-05-01T18:02:40 | 57,597,954 | 0 | 0 | null | 2016-05-01T12:42:24 | 2016-05-01T12:42:24 | null | UTF-8 | R | false | false | 601 | r | plot1.R |
# set working directory
setwd("C:/Users/stuartspern/Documents/Downloads/Courses/Data Science/R_working_directory/Course4_Week1")
# load the data loader file
source("Data_loader.R")
plot1 <- paste(getwd(), "/plot1.png", sep = "")
if(!file.exists(plot1)){
png("plot1.png", width = 480, height = 480)
hist(two_day_data$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off()
} else {
hist(two_day_data$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
} |
9ba8b1ef37ceebf48e42ca648c1768fa5971ee52 | 0ffafa520c0030fd858ce6efcff2dc52b2972b64 | /man/target_type_organiser.Rd | 1527e12afc83dcd4b8aa2c25cbf23b6fffdc7c06 | [] | no_license | AlexanderKononov/cytofBrowser | d0d7b4b70af7d1d37c6bde9eb6aac891d7789af7 | 12f3c7290493f45e504eb7089169eef3b95dbc73 | refs/heads/master | 2022-12-07T14:28:00.372608 | 2020-08-25T17:35:08 | 2020-08-25T17:35:08 | 230,823,004 | 5 | 1 | null | 2020-03-18T15:37:56 | 2019-12-30T00:59:34 | R | UTF-8 | R | false | true | 389 | rd | target_type_organiser.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Correlation.R
\name{target_type_organiser}
\alias{target_type_organiser}
\title{Low-level function to get tt_expData object for one sample}
\usage{
target_type_organiser(cell_ctDist, expData)
}
\arguments{
\item{expData}{}
}
\value{
}
\description{
Low-level function to get tt_expData object for one sample
}
|
b9ed04fd9519ed28c6677f1a8fc692c2f64da076 | 9ab05b7f8d8697fe99e6d4e7917fcb2b3234269c | /man/RisksetsToIpdSkewed.Rd | 9906b14e85ae5463b8805ca0f33837b0f9cefa06 | [] | no_license | kaz-yos/distributed | 87ba8da54be2379c06fe244f4f570db4555770d7 | 46e53316e7ed20bcb8617e238b1b776fbeb364e2 | refs/heads/master | 2021-05-05T17:31:45.076267 | 2018-06-27T14:37:17 | 2018-06-27T14:37:17 | 103,559,562 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,851 | rd | RisksetsToIpdSkewed.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/04.AnalyzeData.R
\name{RisksetsToIpdSkewed}
\alias{RisksetsToIpdSkewed}
\title{Expand weighted risk set data with variance to long format (enhanced).}
\usage{
RisksetsToIpdSkewed(x, compress, helper_fun = RisksetsToIpdSkewedHelper)
}
\arguments{
\item{x}{data frame generated by \code{\link{RequestSiteRisksets}}, containing risk set-level data.}
\item{compress}{defaults to \code{FALSE}. If true, the summary long-format data containing one row for each unique combination of variables including weights are created. The count variable indicates how many individuals each row represent.}
\item{helper_fun}{helper function used to regenerate weights given sample size \code{n}, sample mean \code{m}, sample variance \code{v}, and labeling for treatment \code{A} and event status \code{event}. Defaults to \code{RisksetsToIpdSkewedHelper}, which tries to balance the number of observations with above-mean and below-mean weights. \code{RisksetsToIpdExtremeHelper} will positions just one observation with an above-mean weight and all others below mean.}
}
\value{
data frame populated with point estimates and variance estimates by various methods.
}
\description{
Expand weighted risk set-level data to individual-level data or summary long-format data. The weights are regenerated to maintain the given mean and variance. This version specifically try to avoid negative weights by skewing distribution. Also computation is based on the summary version, which is expaned when \code{compress = FALSE}. The individul-level data contain one row for each individual in each risk set. The summary long-format data are compressed so that each row represent multiple individuals. The count variable indicates how many individuals each row represent.
}
\author{
Kazuki Yoshida
}
|
fde3f8567c78ff6c755e8864b1936797b005cf53 | f5e1eb18ef32b847556eed4f3707b1d5a9689247 | /r_modules/production_imputation/faoswsProductionImputation/ensembleImpute.R | 0eeff819cb758e715ef83ce78d4a3753ba682e05 | [] | no_license | mkao006/sws_r_api | 05cfdd4d9d16ea01f72e01a235614cb0dcaf573d | 040fb7f7b6af05ec35293dd5459ee131b31e5856 | refs/heads/master | 2021-01-10T20:14:54.710851 | 2015-07-06T08:03:59 | 2015-07-06T08:03:59 | 13,303,093 | 2 | 5 | null | 2015-06-30T07:57:41 | 2013-10-03T16:14:13 | R | UTF-8 | R | false | false | 2,908 | r | ensembleImpute.R | ##' Function to perform ensemble imputation
##'
##' This is an implementation of the ensemble imputation methodology
##' developed for the FAO production domain.
##'
##' @param x A numeric vector
##' @param restrictWeights Whether a maximum weight restriction should
##' be imposed.
##' @param maximumWeights The maximum weight to be imposed, must be
##' between [0.5, 1].
##' @param ensembleModel A list of models to be used to build the
##' ensemble.
##' @param plot Whether the result of the ensemble should be plotted.
##'
##' @export
##'
ensembleImpute = function(x, restrictWeights = TRUE,
maximumWeights = 0.7,
ensembleModel = list(defaultMean = defaultMean,
defaultLm = defaultLm, defaultExp = defaultExp,
defaultLogistic = defaultLogistic, defaultLoess = defaultLoess,
defaultSpline = defaultSpline, defaultArima = defaultArima,
defaultMars = defaultMars, defaultNaive = defaultNaive),
plot = FALSE){
T = length(x)
n.model = length(ensembleModel)
ensemble = x
missIndex = is.na(ensemble)
if(any(is.na(x))){
if(length(na.omit(x)) == 0){
ensemble = rep(NA_real_, length(x))
} else if(length(unique(na.omit(x))) == 1){
ensemble = defaultMean(x)
} else {
modelFits = computeEnsembleFit(x = x,
ensembleModel = ensembleModel)
modelWeights = computeEnsembleWeight(x, modelFits,
restrictWeights = restrictWeights,
maximumWeights = maximumWeights)
## print(modelWeights)
ensembleFit = computeEnsemble(modelFits, modelWeights)
ensemble[missIndex] = ensembleFit[missIndex]
if(plot){
if(is.null(names(ensembleModel))){
modelNames = paste0("Model ", 1:n.model)
} else {
modelNames = names(ensembleModel)
}
plot(x, ylim = c(0, 1.1 * max(sapply(modelFits, max),
na.rm = TRUE)), type = "n",
xlab = "", ylab = "")
colPal = brewer.pal(n.model, "Paired")
for(i in 1:n.model){
lines(modelFits[[i]], col = colPal[i])
}
lines(1:T, ensembleFit, col = "steelblue", lwd = 3)
points((1:T)[missIndex], ensembleFit[missIndex],
col = "steelblue", cex = 1, pch = 19)
points(x, pch = 19)
legend("topleft",
legend = c(paste0(modelNames, "(",
round(modelWeights * 100, 2),
"%)"), "Ensemble"),
col = c(colPal, "steelblue"),
lwd = c(rep(1, n.model), 3),
bty = "n")
}
}
} else {
ensemble = x
}
ensemble
}
|
54a3f9863c84414fb0d05e4f323f8f26f8fbfe4b | 3cd8a9e04fb467f5529aff45c4230ca481db23a8 | /man/neg.Rd | 8f1dca05af85f6dccd39292fe207a26188bbdf90 | [] | no_license | jwyang16/CRNMF | 196f73b044542388e746210c4b3b7c29953a6ddd | 30472744cf8ea65c23644058142e51203321f790 | refs/heads/master | 2020-09-10T07:02:59.552608 | 2019-11-14T11:17:17 | 2019-11-14T11:17:17 | 221,679,719 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 382 | rd | neg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CRNMF.R
\name{neg}
\alias{neg}
\title{compute negative part of matrix}
\usage{
neg(A)
}
\arguments{
\item{A}{input matrix}
}
\value{
\item{Am}{negative part of input matrix}
}
\description{
This fuction is used to compute negative part of matrix.
}
\examples{
\dontrun{
Am<-neg(A)
}
}
\keyword{negative}
|
8c290cebc1e8496d5e798f171e06100726f92b3b | 331daade012f87484e435d4e8397122a45d10dae | /R/write.stats.R | 47038f8313179a017d43e8e41741d100c66fc799 | [] | no_license | stela2502/Rscexv | 9f8cd15b6a1b27056d1ef592c4737e33f4ec459f | 81c3d6df48152a3cccd85eead6fd82918b97733f | refs/heads/master | 2022-07-26T15:29:37.035102 | 2022-07-06T15:59:55 | 2022-07-06T15:59:55 | 54,368,831 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,356 | r | write.stats.R | #' @name write.stats
#' @aliases write.stats,Rscexv-method
#' @rdname write.stats-methods
#' @docType methods
#' @description write a statistics table from the lin lang list
#' @param stats the lin lang list default= NULL
#' @param file the outfile default='lin_lang_stats.xls'
#' @title description of function write.stats
setGeneric('write.stats', ## Name
function ( stats = NULL, file='lin_lang_stats.xls' ) {
standardGeneric('write.stats')
}
)
setMethod('write.stats', signature = c ('list'),
definition = function ( stats = NULL, file='lin_lang_stats.xls' ) {
groupL <- function(x) {
if ( ! is.vector(x$medians)){ x$medians = c(-1,-2) }
if ( ! is.vector(x$groupIDs)){ x$groupIDs = c(-1,-2) }
if ( ! is.vector(x$weight)){ x$weight = c(-1,-2) }
c( x$cor, x$p_value,
paste(x$groupIDs[order(x$medians)], collapse =', '),
paste(x$medians[order(x$medians)], collapse =', '),
paste(x$weight[order(x$medians)], collapse =', ')
) }
ma <- NULL
if ( ! is.null(stats) ) {
ma <- t(as.data.frame(lapply(stats, groupL )))
rownames(ma) <- names(stats)
colnames(ma)<- c('Correlation', 'p value', 'groups in order', 'median expression in group', 'weight of group' )
write.table( ma, file=file , sep='\t',quote=F )
}
else {
print ( "No starts to print!" )
}
ma
}
)
|
b1508aabccf64ee41be276759e88c4fc9fe3104e | 3926260e014b713e47f2a144b7e91f5ec62f5cae | /man/Gen.Spec.Test.Rd | a6f2bfd71a16852463197457ac21ea17dd3fc11c | [] | no_license | cran/vrtest | 867cc9fed8c5584c198aa1649413df820357f99d | 405dd094f06dc1cbe6c90c4f00c462c132858d02 | refs/heads/master | 2022-09-24T22:56:16.537410 | 2022-09-05T05:50:02 | 2022-09-05T05:50:02 | 17,700,823 | 1 | 4 | null | null | null | null | UTF-8 | R | false | false | 788 | rd | Gen.Spec.Test.Rd | \name{Gen.Spec.Test}
\alias{Gen.Spec.Test}
\title{ Generalized spectral Test }
\description{
Generalized spectral Test
}
\usage{
Gen.Spec.Test(y,B)
}
\arguments{
\item{y}{ financial return time series }
\item{B}{ the number of bootstrap iterations, the default is 300}
}
\value{
\item{Pboot}{wild bootstrap p-value of the test}
}
\references{
Escanciano, J.C. and Velasco, C., 2006, Generalized Spectral Tests for the martigale Difference Hypothesis, Journal of Econometrics, 134, p151-185.
Charles, A. Darne, O. Kim, J.H. 2011, Small Sample Proeprties of Alternative Tests for Martingale Difference Hypothesis, Economics Letters, 110(2), 151-154.}
\author{ Jae H. Kim}
\examples{
r <- rnorm(100)
Gen.Spec.Test(r)
}
\keyword{ htest }
|
152fb34fdf725583cee15f27a8df745327c8d963 | d0aa62cae3f45ef709cdf383810cf6528b3cbb0e | /man/influx_post.Rd | c7a656a185a486164b2178d8ee1509038da29bd9 | [] | no_license | vspinu/influxdbr | f100bab670409294b705a7f3a3a63ff0dad3222b | cbce4d35b6084906a53a5466f4811bba4b5c4da3 | refs/heads/master | 2020-04-25T21:13:18.651008 | 2018-01-10T10:28:43 | 2018-01-10T10:28:43 | 173,073,936 | 1 | 2 | null | 2019-02-28T08:38:08 | 2019-02-28T08:38:07 | null | UTF-8 | R | false | true | 653 | rd | influx_post.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/influxdb_post.R
\name{influx_post}
\alias{influx_post}
\title{send POST to an InfluxDB server}
\usage{
influx_post(con, db = NULL, query = "")
}
\arguments{
\item{con}{An \code{influx_connection} object (s. \code{\link{influx_connection}}).}
\item{db}{Sets the target database for the query.}
\item{query}{The InfluxDB query to be sent.}
}
\value{
A tibble or NULL
}
\description{
This function sends POST to an InfluxDB server. It is not
exported and only used for some helper functions within this package.
}
\references{
\url{https://influxdb.com/}
}
\keyword{internal}
|
2900d1ef4f984af9566725d40e630c894d71988d | 1904ec6f770060bee2f128f56a8dc44dde5d1dc2 | /Plot1.R | c1cf103f77cca9dc731dc81683fd74935698aa9b | [] | no_license | rahulraj13/ExData_Plotting1 | 601489ebce763238ee09f2c75bfc159bf9df8fe1 | 5ad3bc96fd9d3115b888403f0f03ff3f75da23be | refs/heads/master | 2022-10-22T18:41:54.746453 | 2020-06-05T12:52:15 | 2020-06-05T12:52:15 | 269,633,558 | 0 | 0 | null | 2020-06-05T12:47:40 | 2020-06-05T12:47:39 | null | UTF-8 | R | false | false | 416 | r | Plot1.R | hcd<-read.table("household_power_consumption.txt", header = TRUE, sep = ";",
na.strings = "?")
hcd_final <- subset(hcd, Date %in% c("1/2/2007","2/2/2007"))
hcd_Date <- as.Date(hcd_final$Date, format="%d/%m/%Y")
png("plot1.png", width=480, height=480)
hist(hcd_final$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off() |
d00ddf0631171775326a300b89959b7e345a88ae | 717e2c4ce2a26212bfa1d83d283552308aec97ce | /man/summary.conceptmaps.Rd | 758b7f4909a0d1b9f7c5bd5a02e57761ab3c3712 | [] | no_license | cran/comato | 55912ddfe8636c4380ac37f9cdeed3af63f88de7 | 3fcc4388debb4768536e0c923e17b7e2e84c34fd | refs/heads/master | 2021-01-10T21:57:58.562405 | 2018-03-02T15:36:47 | 2018-03-02T15:36:47 | 17,919,836 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 581 | rd | summary.conceptmaps.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/concept_maps.r
\name{summary.conceptmaps}
\alias{summary.conceptmaps}
\title{Return basic information of a conceptmaps object}
\usage{
\method{summary}{conceptmaps}(object, ...)
}
\arguments{
\item{object}{A conceptmaps object.}
\item{...}{-}
}
\value{
A matrix with one column for each concept map in the set and the number of concepts, edges, and components of this map
respectively in 3 rows.
}
\description{
\code{summary} returns basic information about a conceptmaps object
}
|
1658432049e00c3487b34f99a8012cd996575e06 | 0f84622644e85adc80d17d0026bfbf6678880250 | /assignment 4/assignment-4.R | 4baa59eb07b2978ec72fb93e2eef036db0cc8825 | [
"MIT"
] | permissive | notfy111/Data-Modeling-and-Representation-in-R | 02dd5b8b82cc3e5f0c1144f4338973a0f06c6d24 | 61f360fb597c6d9a9e5a864b34ee5b45f9d9b5cb | refs/heads/main | 2023-06-17T21:00:41.098169 | 2021-07-16T02:59:10 | 2021-07-16T02:59:10 | 386,450,982 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,923 | r | assignment-4.R | library(mice)
library(ggplot2)
library(naniar)
library(VIM)
library(lattice)
tree <- read.csv("/Users/fengyi111/Desktop/2020-Fall/702/assignment-4/treeage.txt",header = TRUE)
# randomly replace age values in 6 observations with NA
set.seed(123)
missing_index = sample(1:20, 6, replace = TRUE)
tree[missing_index,3] <- NA
# inspect missing patterns
md.pattern(tree)
aggr(tree,col=c("orange","lightblue"),numbers=TRUE,sortVars=TRUE,
labels=names(tree),cex.axis=.7,gap=3, ylab=c("Proportion missing","Missingness pattern"))
# given this small sample size, marginplot may not be very helpful to look at
# imputation
tree_imp <- mice(tree,m=50, defaultMethod=c("norm","logreg","polyreg","polr"),
print=F)
stripplot(tree_imp, col=c("grey","darkred"),pch=c(1,20))
# look at diameter vs age
# the trend is kind of consistent across different imputations
xyplot(tree_imp, age ~ diameter | .imp,pch=c(1,20),cex = 1.4,col=c("grey","darkred"))# different distribution of age across imputed dataset
d7 <- complete(tree_imp, 7); d7
d17 <- complete(tree_imp, 17); d17
# imputated data have much larger variance than observed data
densityplot(tree_imp)
# fit linear regression model on one of two randomly selected data
treeregd17 <- lm(age~diameter, data = d17)
summary(treeregd17)
# model diagnostic
# d7
# random pattern, so linearity assumption is satisfied
# plot(treeregd7$residual,x=d7$diameter,xlab="Diameter",ylab="Residual"); abline(0,0)
plot(treeregd17,which=1:5)
d7$group = "Dataset 7"
d17$group = "Dataset 17"
tree$group = "Original"
df = rbind(d7, d17, tree)
#trend
ggplot(data=df, aes(x=diameter,y=age,color=group)) +
geom_point() +
geom_smooth(method='lm',level=0) +
theme(legend.position = 'bottom')
#overlap
ggplot(data=df, aes(x=age,fill=group)) +
geom_density(alpha=0.5) +
theme(legend.position = 'bottom')
treereg_imp <- with(data=tree_imp, lm(diameter~age))
treereg_imp[[4]][[7]]
treereg_imp[[4]][[17]]
tree_reg <- pool(treereg_imp)
# the overall estimate of coefficient is close to what we've observed in the two specific dataset
# need interpretation
summary(tree_reg)
## Qeustion 2
nhanes <- read.csv("/Users/fengyi111/Desktop/2020-Fall/702/assignment-4/nhanes.csv",header = TRUE, na.strings = c('.',NA))
nhanes = nhanes[, !names(nhanes) %in% c('wtmec2yr','sdmvstra','sdmvpsu','ridageyr')]
nhanes$riagendr <- factor(nhanes$riagendr)
nhanes$ridreth2 <- factor(nhanes$ridreth2)
nhanes$dmdeduc <- factor(nhanes$dmdeduc)
nhanes$indfminc<- factor(nhanes$indfminc)
summary(nhanes)
nhanes_imp <- mice(nhanes,m=10, defaultMethod=c("pmm","logreg","polyreg","polr"),
print=F)
# select two complete datasets
n3 <- complete(nhanes_imp, 3);
n10 <- complete(nhanes_imp, 10);
# bmi by age
xyplot(nhanes_imp, bmxbmi ~ age | .imp,pch=c(1,20),cex = 1.4,col=c("grey","darkred"))# different distribution of age across imputed dataset
# bmi by gender
stripplot(nhanes_imp, bmxbmi~.imp|riagendr, col=c("grey","darkred"),pch=c(1,20))
stripplot(nhanes_imp, col=c("grey","darkred"),pch=c(1,20))
n3$group = "Dataset 3"
n10$group = "Dataset 10"
nhanes$group = "Original"
df_nhanes = rbind(n3, n10, nhanes)
#trend
ggplot(data=df_nhanes, aes(x=age,y=bmxbmi,color=group)) +
#geom_point() +
geom_smooth(method='lm',level=0) +
theme(legend.position = 'bottom')
ggplot(data=df_nhanes, aes(x=riagendr,y=bmxbmi,color=group)) +
geom_boxplot() +
#geom_smooth(method='lm',level=0) +
theme(legend.position = 'bottom')
#overlap
ggplot(data=df_nhanes, aes(x=age,fill=group)) +
geom_density(alpha=0.5) +
theme(legend.position = 'bottom')
# fit model
nhanes_reg <- lm(bmxbmi~age+riagendr+ridreth2+ dmdeduc + indfminc + dmdeduc:riagendr,data = n3)
plot(nhanes_reg,which = 1:5)
# assumptions violated, going to transform data
nhanes_log_reg <- lm(log(bmxbmi)~age+riagendr+ridreth2 + dmdeduc + indfminc + dmdeduc:riagendr,data = n3)
plot(nhanes_log_reg,which = 1:5)
model_backward <- step(nhanes_log,direciton='backward',trace=0)
model_backward$call
nhanes_logged <- read.csv("/Users/fengyi111/Desktop/2020-Fall/702/assignment-4/nhanes.csv",header = TRUE, na.strings = c('.',NA))
nhanes_logged = nhanes_logged[, !names(nhanes_logged) %in% c('wtmec2yr','sdmvstra','sdmvpsu','ridageyr')]
nhanes_logged$riagendr <- factor(nhanes_logged$riagendr)
nhanes_logged$ridreth2 <- factor(nhanes_logged$ridreth2)
nhanes_logged$dmdeduc <- factor(nhanes_logged$dmdeduc)
nhanes_logged$indfminc<- factor(nhanes_logged$indfminc)
nhanes_logged$bmxbmi <- log(nhanes_logged$bmxbmi)
nhanes_imp_log <- mice(nhanes_logged,m=10, defaultMethod=c("pmm","logreg","polyreg","polr"),
print=F)
log3 <- complete(nhanes_imp_log, 3);
nhanesreg_imp <- with(data=nhanes_imp_log, lm(bmxbmi~age+riagendr+ridreth2 + dmdeduc + indfminc + dmdeduc:riagendr))
nhanes_overall <- pool(nhanesreg_imp)
summary(nhanes_overall) |
d589b688718eb8fcf2ebf8e97e5f28c1b1f7e84a | 7120d5b70dcef7fc333eec107d90ccfd0a5dcd5c | /man/RelCoef.Rd | 024cb89c5b5db465f824e60b64ae6b06fb014693 | [] | no_license | cran/Relatedness | df20404b551fec87c08a205350609dfcbfee153b | 12a1f21fb316d8033626ff3bf85dcca58f807bbf | refs/heads/master | 2021-01-21T14:08:09.848358 | 2017-11-17T09:51:45 | 2017-11-17T09:51:45 | 48,087,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,880 | rd | RelCoef.Rd | \name{RelCoef}
\alias{RelCoef}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Relatedness Coefficients Estimation for individuals
%% ~~function to do ... ~~
}
\description{This function performs Maximum Likelihood estimation for the relatedness coefficients between individuals based on a bi-allelic genotype matrix. Alternatively, a parental genotype matrix and a crossing matrix can be used. In that case information about structure can also be taken into account via a ParentPop vector.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
RelCoef(IndividualGenom = matrix(0, nrow=0, ncol=0),
ParentalLineGenom = matrix(0, nrow=0, ncol=0),
Freq = matrix(0, nrow=0, ncol=0),
Crossing = matrix(0, nrow=0, ncol=0), ParentPop = rep(0,0),
Combination = list(), Phased = FALSE, Details = FALSE,
NbInit = 5, Prec = 10^(-4), NbCores = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{IndividualGenom}{Genotype matrix of individuals. Each individual is described by 2 columns. Each row corresponds to a marker. Entries of matrix IndividualGenom should be either 0 or 1. Either IndividualGenom or ParentalLineGenom has to be provided.
%% ~~Describe \code{IndividualGenom} here~~
}
\item{ParentalLineGenom}{Genotype matrix of parental lines. Each parental line is described by one column with rows corresponding to markers. Entries of ParentalLineGenome should be either 0 or 1.
%% ~~Describe \code{ParentalLineGenom} here~~
}
\item{Freq}{Allelic frequencies for allele 1 at each markers and for all populations (one column per population, one line per marker).
%% ~~Describe \code{Freq} here~~
}
\item{Crossing}{Required when argument ParentalLineGenom is provided. A 2-column matrix where each row corresponds to a crossing between 2 parents. Parents should be numbered according to their order of appearance in the ParentalLineGenom matrix.
%% ~~Describe \code{Crossing} here~~
}
\item{ParentPop}{Only available if ParentalLineGenom is displayed. A vector of numbers corresponding to population membership for the parental lines.
%% ~~Describe \code{ParentPop} here~~
}
\item{Combination}{If provided, a list of vector with two components. The jth vector is composed with the number of the first hybrid and the number of the second hybrid of the jth couple to study.
%% ~~Describe \code{Combination} here~~
}
\item{Phased}{A Boolean with value TRUE if observations are phased.
%% ~~Describe \code{Phased} here~~
}
\item{Details}{A Boolean variable. If TRUE, the relatedness mode graph is displayed.
%% ~~Describe \code{Details} here~~
}
\item{NbInit}{Number of initial values for the EM algorithm.
%% ~~Describe \code{NbInit} here~~
}
\item{Prec}{Convergence precision parameter for the EM algorithm.
%% ~~Describe \code{Prec} here~~
}
\item{NbCores}{Number of cores used by the algorithm (Default is the number of cores available minus one). Only available for linux and Mac.
%% ~~Describe \code{NbCores} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
Argument IndividualGenom should be used if the available data consist in genotypic information only. By default the data are assumed to be unphased and the function returns 9 relatedness coefficients. If data are phased, use argument Phased = TRUE to obtain the 15 relatedness coefficients. Note that in that case the ordering of the 2 columns per individual in IndividualGenome does matter. Alternatively, if the genotyped individuals are hybrids resulting from the crossing of parental lines (or combinations of parental gametes), it is possible to provide a ParentalLineGenom and a Crossing matrix directly. Additionally, the population membership of the parents can be provided via argument ParentPop. Whatever the arguments used to enter the genotypic data, the allelic frequencies of the markers have to be provided using argument Freq. Arguments NbInit and Prec are tuning parameters for the EM algorithm used for likelihood maximization.
}
\value{
By default, relatedness coefficients are displayed for all couple of genotyped individuals (or hybrids). In that case the function returns a list of matrices, each corresponding to a specific relatedness coefficients (details about relatedness coefficients can be obtained by displaying the relatedness mode graph with argument Details). Element (i,j) of matrix k corresponds to the kth estimated relatedness coefficient for the couple of individuals i and j. Alternatively, if a list of couples is specified with argument Combination, the function returns a list of vectors (each vector corresponding to an relatedness coefficient). In that case element i of vector k corresponds to the kth relatedness coefficient of the ith couple specified in Combination.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\author{Fabien Laporte, 'UMR Genetique Quantitative et Evolution' INRA France.
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\section{Warning }{In absence of population structure, some relatedness coefficients are not identifiable.
Since an EM algorithm is run for each couple of individuals, the procedure can be time consuming for large panels.
}
\examples{
require('Relatedness')
data(Genotype)
data(Frequencies)
data(Cross)
RelatednessCoefficient <- RelCoef(IndividualGenom=matrix(0,ncol=0,nrow=0),
ParentalLineGenom=Genotype,
Freq=Frequencies,Crossing=Cross,
ParentPop=rep(1,8),Phased=TRUE,NbCores=2)
print(RelatednessCoefficient$Delta3)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Relatedness }
|
777ee708a02b9099d73c180633f3daf21e614f7b | c11da6eab192a49d316c216cb21471e0b7569d9c | /Rpath.Rcheck/00_pkg_src/Rpath/R/ecopath.R | 3fe395ed51fe279012d414eff57fcec8264788e6 | [] | no_license | kakearney/RpathDev | 95c7a4106fe0e66d781ab656872584f2cb911bf7 | 69b7967bfa209dd995ec99e98745002cbd57d83a | refs/heads/Public | 2021-01-24T20:52:29.635259 | 2016-08-11T13:17:36 | 2016-08-11T13:17:36 | 65,334,449 | 0 | 0 | null | 2016-08-09T23:14:51 | 2016-08-09T23:14:51 | null | UTF-8 | R | false | false | 17,879 | r | ecopath.R | ## R version of Ecopath balance by Sarah Gaichas and Kerim Aydin
## Modified by Sean Lucey
## Version controled by git
## Function ecopathR takes as input 3 csv files and optional
## ecosystem name
#'Ecopath modual of Rpath
#'
#'Performs initial mass balance using a model parameter file and diet
#'matrix file.
#'
#'@family Rpath functions
#'
#'@param modfile Comma deliminated model parameter file.
#'@param dietfile Comma deliminated diet matrix file.
#'@param pedfile Comma deliminated pedigree file.
#'@param eco.name Optional name of the ecosystem which becomes an attribute of
#' rpath object.
#'
#'@return Returns an Rpath object that can be supplied to the ecosim.init function.
#'@import data.table
#'@export
ecopath <- function(modfile, dietfile, pedfile, eco.name = NA){
#Read in parameter files
model <- as.data.table(read.csv(modfile)) # Basic parameters, detritus fate, catch, discards in that order
diet <- as.data.table(read.csv(dietfile)) # diet matrix
ped <- as.data.table(read.csv(pedfile)) # pedigree file
#Check that all columns of model are numeric and not logical
if(length(which(sapply(model, class) == 'logical')) > 0){
logic.col <- which(sapply(model, class) == 'logical')
for(i in 1:length(logic.col)){
set(model, j = logic.col[i], value = as.numeric(model[[logic.col[i]]]))
}
}
#Remove first column if names
if(sapply(diet, class)[1] == 'factor') diet <- diet[, 1 := NULL, with = F]
if(sapply(ped, class)[1] == 'factor') ped <- ped [, 1 := NULL, with = F]
#Convert NAs to zero in diet matrix
diet[is.na(diet)] <- 0
# Get number of groups, living, dead, and gear
ngroups <- nrow(model)
nliving <- nrow(model[Type < 2, ])
ndead <- nrow(model[Type == 2, ])
ngear <- nrow(model[Type == 3, ])
nodetrdiet <- diet[1:nliving, ]
model[is.na(DetInput), DetInput := 0]
# fill in GE and QB from inputs
GE <- ifelse(is.na(model[, ProdCons]), model[, PB / QB], model[, ProdCons])
QB <- ifelse(is.na(model[, QB]), model[, QB := PB / GE], model[, QB])
# define catch, discards, necessary sums
catchmat <- model[, (10 + ndead + 1):(10 + ndead + ngear), with = F]
discardmat <- model[, (10 + ndead + 1 + ngear):(10 + ndead + (2 * ngear)), with = F]
totcatchmat <- catchmat + discardmat
# KYA 1/16/14 Need if statement here because rowSums fail if only one
# fishery (catch is vector instead of matrix) ##FIX PROPAGATION HERE
if (is.data.frame(totcatchmat)){
totcatch <- rowSums(totcatchmat)
catch <- rowSums(catchmat)
discards <- rowSums(discardmat)
gearcatch <- colSums(catchmat, na.rm = T)
geardisc <- colSums(discardmat, na.rm = T)
}else{
totcatch <- totcatchmat
catch <- catchmat
discards <- discardmat
gearcatch <- sum(catchmat, na.rm = T)
geardisc <- sum(discardmat, na.rm = T)
}
geartot <- gearcatch + geardisc
model[, catch := catch]
model[, discards := discards]
model[, totcatch := totcatch]
# flag missing pars and subset for estimation
model[, noB := 0]
model[, noEE := 0]
model[, alive := 0]
model[is.na(Biomass), noB := 1]
model[is.na(EE), noEE := 1]
model[Type < 2, alive := 1]
# define detritus fate matrix
detfate <- model[, (10 + 1):(10 + ndead), with = F]
# set up and solve the system of equations for living group B or EE
living <- model[alive == 1, ]
living[, Q := totcatch + BioAcc]
living[noEE == 1, diag.a := Biomass * PB]
living[noEE == 0, diag.a := PB * EE]
A <- matrix(0, nliving, nliving)
diag(A) <- living[, diag.a]
QBDC <- as.matrix(nodetrdiet) * living$QB[col(as.matrix(nodetrdiet))]
dimnames(QBDC) <- list(NULL, NULL)
QBDC[is.na(QBDC)] <- 0
QBDCa <- as.matrix(QBDC) * living$noB[col(as.matrix(QBDC))]
A <- A - QBDCa
living[, BioQB := Biomass * QB]
cons <- as.matrix(nodetrdiet) * living$BioQB[col(as.matrix(nodetrdiet))]
living[, Q := Q + rowSums(cons, na.rm = T)]
# Generalized inverse does the actual solving
pars <- MASS::ginv(A, tol = .Machine$double.eps) %*% living[, Q]
living[, EEa := pars * noEE]
living[is.na(EE), EE := EEa]
living[, EEa := NULL]
living[, B := pars * noB]
living[!is.na(Biomass), B := Biomass]
# detritus EE calcs
living[, M0 := PB * (1 - EE)]
living[, QBloss := QB]
living[is.na(QBloss), QBloss := 0]
loss <- c((living[, M0] * living[, B]) + (living[, B] * living[, QBloss] * living[, Unassim]),
model[Type ==2, DetInput],
geardisc)
detinputs <- colSums(loss * detfate)
detdiet <- diet[(nliving + 1):(nliving + ndead), ]
BQB <- living[, B * QB]
detcons <- as.matrix(detdiet) * BQB[col(as.matrix(detdiet))]
detoutputs <- rowSums(detcons, na.rm = T)
EE <- c(living[, EE], as.vector(detoutputs / detinputs))
# added by kya
# if a detritus biomass is put into the spreadsheet, use that and
# calculate PB. If no biomass, but a PB, use that pb with inflow to
# calculate biomass. If neither, use default PB=0.5, Bio = inflow/PB
# This is done because Ecosim requires a detrital biomass.
Default_Detrital_PB <- 0.5
inDetPB <- model[(nliving + 1):(nliving + ndead), PB]
inDetB <- model[(nliving + 1):(nliving + ndead), Biomass]
DetPB <- ifelse(is.na(inDetPB), Default_Detrital_PB, inDetPB)
DetB <- ifelse(is.na(inDetB), detinputs / DetPB, inDetB)
DetPB <- detinputs / DetB
# Trophic Level calcs
TL <- rep(1, ngroups)
TLcoeff <- matrix(0, ngroups, ngroups)
diag(TLcoeff) <- rep(1, ngroups)
gearcons <- as.matrix(totcatchmat) / geartot[col(as.matrix(totcatchmat))]
dimnames(gearcons) <- list(NULL, NULL)
gearcons[is.na(gearcons)] <- 0
dietplus <- as.matrix(diet)
dimnames(dietplus) <- list(NULL, NULL)
dietplus <- rbind(dietplus, matrix(0, ngear, nliving))
dietplus <- cbind(dietplus, matrix(0, ngroups, ndead), gearcons)
TLcoeffA <- TLcoeff - dietplus
TL <- solve(t(TLcoeffA), TL)
#kya changed these following four lines for detritus, and removing NAs
#to match header file format (replacing NAs with 0.0s)
Bplus <- c(living[, B], DetB, rep(0.0, ngear))
PBplus <- model[, PB]
PBplus[(nliving + 1):(nliving + ndead)] <- DetPB
PBplus[is.na(PBplus)] <- 0.0
EEplus <- c(EE, rep(0.0, ngear))
QBplus <- model[, QB]
QBplus[is.na(QBplus)] <- 0.0
GE[is.na(GE)] <- 0.0
RemPlus <- model[, totcatch]
RemPlus[is.na(RemPlus)] <- 0.0
balanced <- list(Group = model[, Group],
TL = TL,
Biomass = Bplus,
PB = PBplus,
QB = QBplus,
EE = EEplus,
GE = GE,
Removals = RemPlus)
M0plus <- c(living[, M0], as.vector(detoutputs / detinputs))
gearF <- as.matrix(totcatchmat) / living[, B][row(as.matrix(totcatchmat))]
newcons <- as.matrix(nodetrdiet) * living[, BQB][col(as.matrix(nodetrdiet))]
predM <- as.matrix(newcons) / living[, B][row(as.matrix(newcons))]
predM <- rbind(predM, detcons)
morts <- list(Group = model[Type < 3, Group],
PB = model[Type < 3, PB],
M0 = M0plus,
F = gearF[1:(nliving + ndead), ],
M2 = predM)
# cleanup before sending to sim -- C code wants 0 as missing value, not NA
balanced$Biomass[is.na(balanced$Biomass)] <- 0
balanced$PB[is.na(balanced$PB)] <- 0
balanced$QB[is.na(balanced$QB)] <- 0
balanced$EE[is.na(balanced$EE)] <- 0
balanced$GE[is.na(balanced$GE)] <- 0
model$BioAcc[is.na(model$BioAcc)] <- 0
model$Unassim[is.na(model$Unassim)] <- 0
dietm <- as.matrix(diet)
dimnames(dietm) <- list(NULL, NULL)
dietm[is.na(dietm)] <- 0
catchmatm <- as.matrix(catchmat)
dimnames(catchmatm) <- list(NULL, NULL)
catchmatm[is.na(catchmatm)] <- 0
discardmatm <- as.matrix(discardmat)
dimnames(discardmatm) <- list(NULL, NULL)
discardmatm[is.na(discardmatm)] <- 0
detfatem <- as.matrix(detfate)
dimnames(detfatem) <- list(NULL, NULL)
detfatem[is.na(detfatem)] <- 0
pedm <- as.matrix(ped)
dimnames(pedm) <- list(NULL, NULL)
pedm[is.na(pedm)] <- 0
# list structure for sim inputs
path.model <- list(NUM_GROUPS = ngroups, #define NUM_GROUPS 80 INCLUDES GEAR
NUM_LIVING = nliving, #define NUM_LIVING 60
NUM_DEAD = ndead, #define NUM_DEAD 3
NUM_GEARS = ngear, #define NUM_GEARS 17
Group = as.character(balanced$Group),
type = model[, Type],
TL = TL,
BB = balanced$Biomass, #float path_BB[1..NUM_GROUPS] vector
PB = balanced$PB, #float path_PB[1..NUM_GROUPS] vector
QB = balanced$QB, #float path_QB[1..NUM_GROUPS] vector
EE = balanced$EE, #float path_EE[1..NUM_GROUPS] vector
BA = model[, BioAcc], #float path_BA[1..NUM_GROUPS] vector
GS = model[, Unassim], #float path_GS[1..NUM_GROUPS] vector
GE = balanced$GE, #float path_GS[1..NUM_GROUPS] vector
pedigree = pedm, #float pedigree[B,PB,QB,Diet,1..NUM_GEARS][1..NUM_LIVING+NUM_DEAD] matrix
DC = dietm, #float path_DC[1..NUM_GROUPS][1..NUM_GROUPS] matrix in [prey][pred] order NUM_LIVING?
DetFate = detfatem, #float path_DetFate[1..NUM_DEAD][1..NUM_GROUPS] matrix in [det][groups] order
Catch = catchmatm, #float path_Catch[1..NUM_GEARS][1..NUM_GROUPS] matrix
Discards = discardmatm) #float path_Discards[1..NUM_GEARS][1..NUM_GROUPS] matrix
#Define class of output
class(path.model) <- 'Rpath'
attr(path.model, 'eco.name') <- eco.name
return(path.model)
}
#'Plot routine for Ecopath food web
#'
#'Plots the food web associated with an Rpath object.
#'
#'@family Rpath functions
#'
#'@param Rpath.obj Rpath model created by the ecopath() function.
#'@param highlight Box number to highlight connections.
#'@param eco.name Optional name of the ecosystem. Default is the eco.name attribute from the
#' rpath object.
#'@param highlight Set to the group number to highlight the connections of that group.
#'@param highlight.col Color of the connections to the highlighted group.
#'@param labels Logical whether or not to display group names. If True and label.pos is Null, no
#' points will be ploted, just label names.
#'@param label.pos A position specifier for the labels. Values of 1, 2, 3, 4, respectively
#' indicate positions below, to the left of, above, and to the right of the points. A null
#' value will cause the labels to be ploted without the points (Assuming that labels = TRUE).
#'@param label.num Logical value indication whether group numbers should be used for labels
#' instead of names.
#'@param line.col The color of the lines between nodes of the food web.
#'@param fleets Logical value indicating whether or not to include fishing fleets in the food web.
#'@param type.col The color of the points cooresponding to the types of the group. Can either be
#' of length 1 or 4. Color order will be living, primary producers, detrital, and fleet groups.
#'@param box.order Vector of box numbers to change the default plot order. Must include all box numbers
#'@param label.cex The relative size of the labels within the plot.
#'
#'@return Creates a figure of the food web.
#'@import data.table
#'@export
webplot <- function(Rpath.obj, eco.name = attr(Rpath.obj, 'eco.name'), line.col = 'grey',
highlight = NULL, highlight.col = c('black', 'red', 'orange'),
labels = FALSE, label.pos = NULL, label.num = FALSE, label.cex = 1,
fleets = FALSE, type.col = 'black', box.order = NULL){
pointmap <- data.table(GroupNum = 1:length(Rpath.obj$TL),
Group = Rpath.obj$Group,
type = Rpath.obj$type,
TL = Rpath.obj$TL,
Biomass = Rpath.obj$BB)
pointmap[TL < 2, TLlevel := 1]
pointmap[TL >= 2.0 & TL < 3.0, TLlevel := 2]
pointmap[TL >= 3.0 & TL < 3.5, TLlevel := 3]
pointmap[TL >= 3.5 & TL < 4.0, TLlevel := 4]
pointmap[TL >= 4.0 & TL < 4.5, TLlevel := 5]
pointmap[TL >= 4.5 & TL < 5.0, TLlevel := 6]
pointmap[TL >= 5.0, TLlevel := 7]
if(!is.null(box.order)) pointmap <- pointmap[box.order, ]
if(fleets == F) pointmap <- pointmap[type < 3, ]
nTL <- table(pointmap[, TLlevel])
pointmap[, n := nTL[which(names(nTL) == TLlevel)], by = TLlevel]
pointmap[, x.space := 1 / n]
pointmap[, x.offset := x.space / 2]
x.count.all <- c()
for(i in 1:max(pointmap[, TLlevel])){
x.count <- pointmap[TLlevel == i, list(Group)]
for(j in 1:nrow(x.count)){
x.count[j, x.count := j]
}
x.count.all <- rbind(x.count.all, x.count)
}
pointmap <- merge(pointmap, x.count.all, by = 'Group', all.x = T)
pointmap[x.count == 1, x.pos := x.offset + rnorm(1, 0, 0.01)]
pointmap[x.count != 1, x.pos := x.offset + x.space * (x.count - 1) + rnorm(1, 0, 0.01)]
pointmap[, c('TLlevel', 'n', 'x.offset', 'x.space', 'x.count') := NULL]
ymin <- min(pointmap[, TL]) - 0.1 * min(pointmap[, TL])
ymax <- max(pointmap[, TL]) + 0.1 * max(pointmap[, TL])
plot(0, 0, ylim = c(ymin, ymax), xlim = c(0, 1), typ = 'n', xlab = '',
ylab = '', axes = F)
if(!is.null(eco.name)) mtext(3, text = eco.name, cex = 1.5)
axis(2, las = T)
box()
mtext(2, text = 'Trophic Level', line = 2)
#Web connections
tot.catch <- Rpath.obj$Catch + Rpath.obj$Discards
pred <- pointmap[type %in% c(0, 3), GroupNum]
for(i in pred){
pred.x <- pointmap[GroupNum == i, x.pos]
pred.y <- pointmap[GroupNum == i, TL]
if(pointmap[GroupNum == i, type] == 0){
prey <- which(Rpath.obj$DC[, i] > 0)
}
if(pointmap[GroupNum == i, type] == 3){
gear.num <- i - (Rpath.obj$NUM_GROUPS - Rpath.obj$NUM_GEARS)
prey <- which(tot.catch[, gear.num] > 0)
}
prey.x <- pointmap[GroupNum %in% prey, x.pos]
prey.y <- pointmap[GroupNum %in% prey, TL]
for(j in 1:length(prey)){
lines(c(pred.x, prey.x[j]), c(pred.y, prey.y[j]), col = line.col)
}
}
if(!is.null(highlight)){
pred.x <- pointmap[GroupNum == highlight, x.pos]
pred.y <- pointmap[GroupNum == highlight, TL]
if(pointmap[GroupNum == highlight, type] == 0){
prey <- which(Rpath.obj$DC[, highlight] > 0)
group.pred <- which(Rpath.obj$DC[highlight, ] > 0)
fleet.pred <- which(tot.catch[highlight, ] > 0)
}
if(pointmap[GroupNum == highlight, type] %in% c(1:2)){
prey <- NULL
group.pred <- which(Rpath.obj$DC[highlight, ] > 0)
fleet.pred <- which(tot.catch[highlight, ] > 0)
}
if(pointmap[GroupNum == highlight, type] == 3){
gear.num <- highlight - (Rpath.obj$NUM_GROUPS - Rpath.obj$NUM_GEARS)
prey <- which(tot.catch[, gear.num] > 0)
group.pred <- NULL
fleet.pred <- NULL
}
if(!is.null(prey)){
prey.x <- pointmap[GroupNum %in% prey, x.pos]
prey.y <- pointmap[GroupNum %in% prey, TL]
for(j in 1:length(prey)){
lines(c(pred.x, prey.x[j]), c(pred.y, prey.y[j]), col = highlight.col[1], lwd = 2)
}
}
if(!is.null(group.pred)){
group.pred.x <- pointmap[GroupNum %in% group.pred, x.pos]
group.pred.y <- pointmap[GroupNum %in% group.pred, TL]
for(j in 1:length(group.pred)){
lines(c(pred.x, group.pred.x[j]), c(pred.y, group.pred.y[j]),
col = highlight.col[2], lwd = 2)
}
}
if(length(fleet.pred) > 0){
gear.num <- fleet.pred + (Rpath.obj$NUM_GROUPS - Rpath.obj$NUM_GEARS)
fleet.pred.x <- pointmap[GroupNum %in% gear.num, x.pos]
fleet.pred.y <- pointmap[GroupNum %in% gear.num, TL]
for(j in 1:length(fleet.pred)){
lines(c(pred.x, fleet.pred.x[j]), c(pred.y, fleet.pred.y[j]),
col = highlight.col[3], lwd = 2)
}
}
legend('bottomleft', legend = c('prey', 'predator', 'fleet'), lty = 1, col = highlight.col,
lwd = 2, ncol = 3, xpd = T, inset = c(0, -.1))
legend('topright', legend = pointmap[GroupNum == highlight, Group], bty = 'n')
}
#Group points
if(!is.null(label.pos) | labels == F){
if(length(type.col) ==4){
legend('bottomright', legend = c('living', 'primary', 'detrital', 'fleet'),
pch = 16, col = type.col, ncol = 4, xpd = T, inset = c(0, -.1))
}
if(length(type.col) < 4) type.col <- rep(type.col[1], 4)
points(pointmap[type == 0, x.pos], pointmap[type == 0, TL], pch = 16, col = type.col[1])
points(pointmap[type == 1, x.pos], pointmap[type == 1, TL], pch = 16, col = type.col[2])
points(pointmap[type == 2, x.pos], pointmap[type == 2, TL], pch = 16, col = type.col[3])
points(pointmap[type == 3, x.pos], pointmap[type == 3, TL], pch = 16, col = type.col[4])
}
if(labels == T){
if(label.num == F){
text(pointmap[, x.pos], pointmap[, TL], pointmap[, Group],
pos = label.pos, cex = label.cex)
}
if(label.num == T){
text(pointmap[, x.pos], pointmap[, TL], pointmap[, GroupNum],
pos = label.pos, cex = label.cex)
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.