blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a0df08520db88c40d155ca73929bce7d191db2b3 | f1f38d1f92133aaa0ee5c3df6b0048aaf0dd9054 | /man/ldfast.Rd | 9bd282f29c21bbe4f319cca2fefda235f5262bc0 | [
"CC0-1.0"
] | permissive | imclab/rgbif | acda8ae9828d4cb281deab6016e1741192e8756b | c62edb8ecd0f89796dd18a38cfb8cd327e25584e | refs/heads/master | 2021-01-11T05:02:47.481188 | 2013-11-29T05:40:54 | 2013-11-29T05:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 340 | rd | ldfast.Rd | \name{ldfast}
\alias{ldfast}
\title{Replacement function for ldply that should be faster.}
\usage{
ldfast(x, convertvec = FALSE)
}
\arguments{
\item{x}{A list.}
\item{convertvec}{Convert a vector to a data.frame before
rbind is called.}
}
\description{
Replacement function for ldply that should be faster.
}
\keyword{internal}
|
dfa82311fd90cae2ed20b3b8bd2f01a1c9c3d8f0 | b29b91f1c84be419c2135cb8283d34890437ef48 | /tests/testthat.R | 111a3273ef694efc5a08f8b467d72e21e7a81a2f | [] | no_license | Susarro/arqastwb | 8ededc0e8c5f8d74e0dd8df5cd196b3082d377db | 0dd802ec946d25f306d6018c0f0f3d52f6bddfc8 | refs/heads/master | 2020-05-04T12:45:17.341662 | 2019-04-03T20:57:14 | 2019-04-03T20:57:14 | 179,133,627 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 60 | r | testthat.R | library(testthat)
library(arqastwb)
test_check("arqastwb")
|
e4abc7118a168d81cec059b152437c4f37b4cbba | 9358a0edf2d9a3128b4fcff5f6aa4d65b81489b5 | /tests/testthat/test-linelist-class.R | b3b806ab67818acf8d5030287ce17e1c6a0a144a | [] | no_license | scottyaz/linelist | d7a3b5578ad8463d6df16b3dfa12f1255a161aa5 | 8636b2ccf55a0e7182439d7d07913170648ccd5a | refs/heads/master | 2020-05-23T01:24:48.301592 | 2019-04-11T11:49:21 | 2019-04-11T11:49:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,670 | r | test-linelist-class.R | context("linelist class tests")
oev <- get_dictionary()
ll <- as_linelist(clean_data(messy_data(), first_date = as.Date("1969-4-20")),
id = "id",
date_onset = "date_of_onset",
case_definition = "epi_case_definition",
gender = "gender",
geo_lon = "lon",
geo_lat = "lat",
NULL
)
test_that("linelist announces itself", {
expect_output(print(ll), "linelist")
expect_output(print(ll, show_epivars = TRUE), "epivars")
})
test_that("a linelist is a data frame", {
expect_is(ll, "data.frame")
expect_is(ll, "linelist")
})
test_that("a linelist contains the epivars attribute", {
expect_true("epivars" %in% names(attributes(ll)))
expect_is(attr(ll, "epivars"), "list")
})
test_that("set_epivars() will return the original linelist unharmed", {
expect_identical(ll, set_epivars(ll))
})
test_that("[re]set_epivars() will create a linelist", {
ll2 <- set_epivars(as.data.frame(ll),
id = "id",
date_onset = "date_of_onset",
case_definition = "epi_case_definition",
gender = "gender",
geo_lon = "lon",
geo_lat = "lat",
NULL
)
ll3 <- reset_epivars(as.data.frame(ll),
id = "id",
date_onset = "date_of_onset",
case_definition = "epi_case_definition",
gender = "gender",
geo_lon = "lon",
geo_lat = "lat",
NULL
)
expect_identical(ll, ll2)
expect_identical(ll, ll3)
})
test_that("reset_epivars() will return nothing if given nothing", {
expect_length(attr(reset_epivars(ll), "epivars"), 0)
})
test_that("a linelist class will be the same subsetting by nothing", {
expect_identical(ll, ll[])
})
test_that("the epivars attribute will reflect the order of the linelist class", {
rll <- rev(ll)
evll <- unlist(attr(rll, "epivars"), use.names = FALSE)
expect_identical(evll, names(rll)[names(rll) %in% evll])
})
test_that("epivars will be subset along with the linelist", {
llsub <- ll[, c("epi_case_definition", "lon", "lat")]
expect_named(attr(llsub, "epivars"), c("case_definition", "geo_lon", "geo_lat"))
})
test_that("epivars can be removed", {
llnocase <- set_epivars(ll, case_definition = NULL, id = NULL)
expect_false("case_definition" %in% attr(llnocase, "epivars"))
expect_false("id" %in% attr(llnocase, "epivars"))
})
reset_dictionary()
|
1c5f1c0e220598298e52dc014f67669c0bb49a5d | 34131c61655635da412ea7474ba22a172f17f6ff | /man/ov_set_video_meta.Rd | 066622207db648ce4e17fc5ff2b41dbf831f78fa | [
"MIT"
] | permissive | openvolley/ovideo | 8cfbe226050de88dbec2c3e7016e419708f37ea6 | c9380b9dcc4be3669dda086949aab421164d14ff | refs/heads/master | 2023-05-11T23:44:39.775596 | 2023-05-02T22:17:54 | 2023-05-02T22:17:54 | 244,466,765 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,747 | rd | ov_set_video_meta.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tags.R
\name{ov_set_video_meta}
\alias{ov_set_video_meta}
\title{Set metadata tags in a video file}
\usage{
ov_set_video_meta(
video_file,
...,
movflags = FALSE,
overwrite = FALSE,
debug = FALSE
)
}
\arguments{
\item{video_file}{string: path to the video file}
\item{...}{: named values to set}
\item{movflags}{logical: if \code{TRUE}, add "-movflags use_metadata_tags" to the command-line ffmpeg call. This allows arbitrary tag names to be used with mp4/m4v/mov video formats, but note that these may be stored in a manner that some video software cannot read. If \code{movflags = FALSE}, the supported video tag names (i.e. allowable names in the \code{...} parameters) depend on the video file type}
\item{overwrite}{logical: if \code{TRUE} overwrite the \code{video_file}, see Details}
\item{debug}{logical: if \code{TRUE}, echo the ffmpeg output to the console}
}
\value{
The path to the new video file, which if \code{overwrite = TRUE} will be the input file, otherwise a file in the temporary directory
}
\description{
Requires that ffmpeg is available on your system path.
}
\details{
This function creates a new video file with the specified metadata added. This is always a file in the temporary directory. If \code{overwrite = TRUE}, the original file is deleted and replaced with the new file.
Note that if \code{movflags = FALSE}, the supported video tag names (i.e. allowable names in the \code{...} parameters) depend on the video file type.
}
\examples{
\dontrun{
newfile <- ov_set_video_meta(ov_example_video(), comment = "A comment")
ov_get_video_meta(newfile)
}
}
\seealso{
\code{\link[=ov_get_video_meta]{ov_get_video_meta()}}
}
|
c5f23ee8af26c3b6f9a6a3fd8c19585a5ed60969 | a307f2c65811683e3fc453950c8e3f2c7ad588aa | /session_4_ex_2/app.R | c8d60a408f36dda55a8b8e78159f5deac1259baa | [] | no_license | clairegmadden/esm-244-week-4-materials | 7d08cfcdbfd7433551489635b7b3db3e49a518ce | 9b64b132c844fa0852dbc71e317dc6ba45ccfd89 | refs/heads/master | 2020-12-23T11:00:54.027741 | 2020-01-30T19:46:09 | 2020-01-30T19:46:09 | 237,130,044 | 0 | 0 | null | 2020-01-30T03:15:20 | 2020-01-30T03:15:19 | null | UTF-8 | R | false | false | 2,105 | r | app.R |
# second app example!
# attach packages
library(shiny)
library(tidyverse)
library(here)
library(shinythemes)
# using existing dataset in dplyr called "diamonds"
ui <- navbarPage("Navigation Bar!",
# update the theme of your app!
theme = shinytheme("cyborg"),
# add some tabs to create different panels in ui
tabPanel("First tab!",
# h1 is largest header option, h2 is smaller etc
# p indicates just regular paragraph text
h1("Some giant text"),
p("Here is some regular text"),
plotOutput(outputId = "diamond_plot")),
tabPanel("Second tab!",
sidebarLayout(
sidebarPanel("Some text!",
# add a checkbox widget!
checkboxGroupInput(inputId = "diamondclarity",
"Choose some!", # this makes your label
choices = c(levels(diamonds$clarity)))),
mainPanel("Main panel text.",
plotOutput(outputId = "diamond_plot_2"))
))
)
server <- function(input, output){
# create some graph output to show up in ui
output$diamond_plot <- renderPlot({
ggplot(data = diamonds, aes(x = carat, y = price))+
geom_point(aes(color = clarity)) # changing something based on a variable must go within aes()
})
diamond_clarity <- reactive({
diamonds %>%
# keep anything in the clarity column that matches the selection in the widget we made to select clarity
filter(clarity %in% input$diamondclarity)
})
output$diamond_plot_2 <- renderPlot({
ggplot(data = diamond_clarity(), aes(x = clarity, y = price))+
geom_violin(aes(fill = clarity))
})
}
shinyApp(ui = ui, server = server)
|
6348899519aa6e3c8ebd856011453472c4750cf4 | 32da40937286fae9019bc23fdb306b864b7c9194 | /tests.R | 04473c748701b5ef6e0376bacf37a001f99a9065 | [] | no_license | yangxhcaf/conus_herbaceous_npp_processing | f9fcd082e2f21a0c6a0e6f9ed344fa12da6c48f6 | 99e97e3942b549cbbe31d2bfcc8b8c92ae0bdaa1 | refs/heads/master | 2022-04-07T20:48:49.109333 | 2020-01-31T21:59:52 | 2020-01-31T21:59:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,164 | r | tests.R | #Import of historcial climate covariates
library(tidyr)
#G:\My Drive\range-resilience\Sensitivity\Preliminary_work\SoilMoisture_Data\SOILWAT_Output_Fall2019
soil_moisture_dir_dir <- dir("G:/My Drive/range-resilience/Sensitivity/Preliminary_work/SoilMoisture_Data/SOILWAT_Output_Fall2019")
covariates<-soil_moisture_dir_dir[-c(1)]
soil_moisture_dir <- "G:/My Drive/range-resilience/Sensitivity/Preliminary_work/SoilMoisture_Data/SOILWAT_Output_Fall2019"
####potential evapotranspiration #####
pet_covariates<-covariates[c(1:4)]
pet.covariates.list<-list()
for(i in pet_covariates[1:4])
{
test <- get(load(file.path(soil_moisture_dir,i)))
cleanup_test<-initial_cleanup(test)
make_df<-raster_link_function_x(cleanup_test)
pet.covariates.list[[i]] <- make_df
}
df.pet<- do.call("rbind", pet.covariates.list)
rm(pet.covariates.list)
df.pet$label <- row.names(df.pet)
rownames(df.pet) <- c()
head(df.pet)
df.pet$label <- substr(df.pet$label, 0, 10)
df.pet_wide <- spread(df.pet, label, value)
head(df.pet_wide)
rm(df.pet)
herbaceous_npp_2<-merge(herbaceous_npp,df.pet_wide,by=c('x','y','year')) #dataframe to merge on to
head(herbaceous_npp_2)
rm(df.pet_wide)
#head(df.pet_wide)
#View(df.pet)
####SWA########
swa_covariates<-covariates[c(5:8)]
swa.covariates.list<-list()
for(i in swa_covariates[1:4])
{
test <- get(load(file.path(soil_moisture_dir,i)))
cleanup_test<-initial_cleanup(test)
make_df<-raster_link_function_x(cleanup_test)
swa.covariates.list[[i]] <- make_df
}
df.swa<- do.call("rbind", swa.covariates.list)
head(df.swa)
rm(swa.covariates.list)
df.swa$label <- row.names(df.swa)
rownames(df.swa) <- c()
df.swa$label <- substr(df.swa$label, 0, 10)
df.swa_wide <- spread(df.swa, label, value)
head(df.swa_wide)
rm(df.swa)
herbaceous_npp_3<-merge(herbaceous_npp_2,df.swa_wide,by=c('x','y','year'))
head(herbaceous_npp_3)
rm(herbaceous_npp_2)
####SWA PET ratio########
SWAPETratio_covariates<-covariates[c(9:12)]
SWAPETratio.covariates.list<-list()
for(i in SWAPETratio_covariates[1:4])
{
test <- get(load(file.path(soil_moisture_dir,i)))
cleanup_test<-initial_cleanup(test)
make_df<-raster_link_function_x(cleanup_test)
SWAPETratio.covariates.list[[i]] <- make_df
}
df.SWAPETratio_covariates<- do.call("rbind", SWAPETratio.covariates.list)
head(df.SWAPETratio_covariates)
rm(SWAPETratio.covariates.list)
df.SWAPETratio_covariates$label <- row.names(df.SWAPETratio_covariates)
rownames(df.SWAPETratio_covariates) <- c()
df.SWAPETratio_covariates$label <- substr(df.SWAPETratio_covariates$label, 0, 18) # NEED TO MODIFY
df.SWAPETratio_wide <- spread(df.SWAPETratio_covariates, label, value)
rm(df.SWAPETratio_covariates)
head(df.SWAPETratio_wide)
herbaceous_npp_4<-merge(herbaceous_npp_3,df.SWAPETratio_wide,by=c('x','y','year'))
head(herbaceous_npp_4)
rm(herbaceous_npp_3)
####volumetric water content down to 1 meter ########
VWC1m_covariates<-covariates[c(13:16)]
VWC1m_covariates.list<-list()
for(i in VWC1m_covariates[1:4])
{
test <- get(load(file.path(soil_moisture_dir,i)))
cleanup_test<-initial_cleanup(test)
make_df<-raster_link_function_x(cleanup_test)
VWC1m_covariates.list[[i]] <- make_df
}
df.VWC1m<- do.call("rbind", VWC1m_covariates.list)
head(df.VWC1m)
rm(VWC1m_covariates.list)
df.VWC1m$label <- row.names(df.VWC1m)
rownames(df.VWC1m) <- c()
df.VWC1m$label <- substr(df.VWC1m$label, 0, 11) # NEED TO MODIFY
df.VWC1m_wide <- spread(df.VWC1m, label, value)
head(df.VWC1m_wide)
rm(df.VWC1m)
herbaceous_npp_5<-merge(herbaceous_npp_4,df.VWC1m_wide,by=c('x','y','year'))
head(herbaceous_npp_5)
rm(herbaceous_npp_4)
#### water year precipitation used for NPP processing########
WatYrPRECIP_covariates<-covariates[c(17)]
WatYrPRECIP_covariates.list<-list()
for(i in WatYrPRECIP_covariates[1])
{
test <- get(load(file.path(soil_moisture_dir,i)))
cleanup_test<-initial_cleanup(test)
make_df<-raster_link_function_x(cleanup_test)
WatYrPRECIP_covariates.list[[i]] <- make_df
}
df.WatYrPRECIP<-as.data.frame(WatYrPRECIP_covariates.list)
head(df.WatYrPRECIP)
rm(WatYrPRECIP_covariates.list)
names(df.WatYrPRECIP)<- gsub('WatYrPRECIP_ALLregionsHIST.Rdata.', '',names(df.WatYrPRECIP))
colnames(df.WatYrPRECIP) <- c("x","y","year",'mm')
df.WatYrPRECIP$mm<-df.WatYrPRECIP$mm*10
####Other water year covariates ########
wy_covariates<-covariates[c(18:21)]
wy_covariates.list<-list()
for(i in wy_covariates[1:4])
{
test <- get(load(file.path(soil_moisture_dir,i)))
cleanup_test<-initial_cleanup(test)
make_df<-raster_link_function_x(cleanup_test)
wy_covariates.list[[i]] <- make_df
}
df.wy<- do.call("rbind", wy_covariates.list)
head(df.wy)
rm(wy_covariates.list)
df.wy$label <- row.names(df.wy)
rownames(df.wy) <- c()
head(df.wy)
df.wy$label <- substr(df.wy$label, 0, 9)
df.wy_wide <- spread(df.wy, label, value)
head(df.wy_wide)
rm(df.wy)
herbaceous_npp_6<-merge(herbaceous_npp_5,df.wy_wide,by=c('x','y','year'))
head(herbaceous_npp_6)
rm(herbaceous_npp_5)
#save file for now
saveRDS(herbaceous_npp_6,
file = 'G:/My Drive/range-resilience/Sensitivity/CONUS_rangelands_NPP_Sensitivity/Processing NPP Data/Hebaceous NPP Data processing/Hebaceous_NPP_Processing/historical_covariates_herbaceous_npp_1.rds')
####transpiration########
transp_covariates<-covariates[c(22,23,24)]
transp_covariates.list<-list()
for(i in transp_covariates[1:3])
{
test <- get(load(file.path(soil_moisture_dir,i)))
cleanup_test<-initial_cleanup(test)
make_df<-raster_link_function_x(cleanup_test)
transp_covariates.list[[i]] <- make_df
}
df.transp<- do.call("rbind", transp_covariates.list)
head(df.transp)
rm(transp_covariates.list)
df.transp$label <- row.names(df.transp)
rownames(df.transp) <- c()
df.transp$label <- substr(df.wy$label, 0, 10) # NEED TO MODIFY
df.transp_wide <- spread(df.transp, label, value)
head(df.transp_wide)
test.transp <- get(load(file.path(soil_moisture_dir,"WY_Transp50_ALLregionsHIST.Rdata")))
str(test.transp)
View(test.transp)
cleanup_test_transp<-initial_cleanup(test.transp)
make_df_transp<-raster_link_function_x(cleanup_test_transp)
summary(cleanup_test_transp)
View(cleanup_test_transp)
head(test.transp)
|
8381e400cc4e542f17c20e57596d867a94c23533 | 8bb73d638a503c1e005dcb15843e52cea068ef3d | /R/genemodel.R | b29a8cf7b5b8d6ffdb634411ddc8b2ecf0c659f5 | [] | no_license | vjcitn/erma | 4f869b399374c9919d60b369bafd6ed1394b34df | f8384452c185284bc894d8cfb5656215b12bd753 | refs/heads/master | 2020-04-30T00:05:40.799973 | 2019-03-19T11:38:53 | 2019-03-19T11:38:53 | 176,494,759 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,992 | r | genemodel.R |
genemodelOLD = function (sym, genome = "hg19", annoResource = Homo.sapiens,
getter = exonsBy, byattr = "gene")
{
stopifnot(is.atomic(sym) && (length(sym) == 1))
if (!exists(dsa <- deparse(substitute(annoResource))))
require(dsa, character.only = TRUE)
num = AnnotationDbi::select(annoResource, keys = sym, keytype = "SYMBOL",
columns = c("ENTREZID", "SYMBOL"))$ENTREZID
getter(annoResource, by = byattr)[[num]]
}
genemodel = function(key, keytype="SYMBOL", annoResource=Homo.sapiens,
keepStandardChromosomes=TRUE) {
#
# purpose is to get exon addresses given a symbol
# propagate seqinfo from annotation resource
#
if (class(annoResource)=="OrganismDb") {
oblig=c("EXONCHROM", "EXONSTART", "EXONEND", "EXONSTRAND", "EXONID")
addrs = AnnotationDbi::select(annoResource, keys=key, keytype=keytype, columns=oblig)
ans = GRanges(addrs$EXONCHROM, IRanges(addrs$EXONSTART, addrs$EXONEND),
strand=addrs$EXONSTRAND, exon_id=addrs$EXONID)
}
else if (class(annoResource)=="EnsDb") {
oblig = c("SEQNAME", "EXONSEQSTART", "EXONSEQEND", "SEQSTRAND",
"EXONIDX")
addrs = AnnotationDbi::select(annoResource, keys=key, keytype=keytype, columns=oblig)
ans = GRanges(addrs$SEQNAME,
IRanges(addrs$EXONSEQSTART, addrs$EXONSEQEND),
strand = addrs$SEQSTRAND, exon_id = addrs$EXONIDX)
}
else stop("annoResource must be of class OrganismDb or EnsDb")
mcols(ans)[[keytype]] = key
useq = unique(as.character(seqnames(ans)))
si = seqinfo(annoResource)
seqinfo(ans) = si[useq,]
if (keepStandardChromosomes)
return(keepStandardChromosomes(ans, pruning.mode="coarse"))
ans
}
map2range = function(maptag="17q12", annoResource=Homo.sapiens) {
t1 = AnnotationDbi::select(annoResource, keys=maptag, keytype="MAP", columns=
c("TXSTART", "TXEND", "TXCHROM"))
ans = GRanges(t1$TXCHROM[1], IRanges(min(t1$TXSTART), max(t1$TXEND)))
si = seqinfo(annoResource)
seqinfo(ans) = si[t1$TXCHROM[1],]
ans
}
|
8249c546362dd07047700343922d848107aa9efb | 8e268cfd1fcfc7bc6c8cd611c691340492ba450c | /pca_small.R | 539d5a4970d642a1ca1dcdd8939da9185cd4ee73 | [] | no_license | jason-guanqun/StatsLearning | 289c5226cc8c73f588989d4b39cb2714c9e72c7d | 4ae0bbf3df00aa0ac1506cf874ca3e785e6586cb | refs/heads/master | 2020-03-16T17:16:36.952009 | 2018-05-21T14:11:14 | 2018-05-21T14:11:14 | 132,824,821 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,722 | r | pca_small.R | ##################################################
## Script for pca experiments on original dataset
##################################################
library(mlbench)
library(neuralnet)
library(plyr)
#load data
BC<-data("BreastCancer")
BC<-BreastCancer
#preprocessing: clean -- select column, remove missing data, change it to be numeric dataframe
# we don't need the id number
newBC<-subset(BC,select = c(2:11))
# eliminate missing values
newBC<-newBC[complete.cases(newBC), ]
# transform the text into numeric dataframe
newBC<-sapply(newBC,as.numeric)
scaled<-as.data.frame(newBC)
scaled$Class[scaled$Class == 1] <- 0
scaled$Class[scaled$Class == 2] <- 1
# extract all positive and negative records
scaled_p<-subset(scaled,scaled$Class==0)
scaled_n<-subset(scaled,scaled$Class==1)
scaled_p.shuffle<-scaled_p[sample(nrow(scaled_p)),]
scaled_n.shuffle<-scaled_n[sample(nrow(scaled_n)),]
# split positive records evenly into three parts
row.p <-nrow(scaled_p.shuffle)
index.1<-round(0.33*row.p) #151
index.2<-round(0.66*row.p) #307
split.1<-scaled_p.shuffle[1:(index.1+1),] #152
split.2<-scaled_p.shuffle[(index.1+2):(index.2+3),] #153
split.3<-scaled_p.shuffle[(index.2+4):row.p,] #153
list.p <- list(split.1, split.2, split.3)
# split negative records evenly into three parts
row.n <-nrow(scaled_n.shuffle)
index.1<-round(0.33*row.n) #80
index.2<-round(0.66*row.n) #159
split.1<-scaled_n.shuffle[1:(index.1+1),] #81
split.2<-scaled_n.shuffle[(index.1+2):(index.2+2),] #80
split.3<-scaled_n.shuffle[(index.2+3):row.n,] #80
list.n <- list(split.1, split.2, split.3)
# combine positive and negative records into one group
splitall.1<-rbind(list.p[[1]],list.n[[1]])
splitall.2<-rbind(list.p[[2]],list.n[[2]])
splitall.3<-rbind(list.p[[3]],list.n[[3]])
# combine three groups
train.list<-list(rbind(splitall.1,splitall.2),rbind(splitall.1,splitall.3),rbind(splitall.2,splitall.3))
test.list<-list(splitall.3,splitall.2,splitall.1)
#nn initialization
cv.error <- NULL
miss_classified <- NULL
miss_classified.rate<- NULL
k <- 3
pbar <- create_progress_bar('text')
pbar$init(k)
# cross valiation loops
for(i in 1:3){
# extract one group of train and test data respectively
train.cv<-train.list[[i]]
test.cv<-test.list[[i]]
#pca generation, the class is eliminated
pca <- prcomp(train.cv[1:9], scale. = T)
#decide the number of pcas
pca$rotation
std_dev <-pca$sdev
var <- std_dev^2
prop_var <- var/sum(var)
plot(prop_var, xlab = "Principal Component",
ylab = "Single Proportion of Variance Explained",
type = "b")
plot(cumsum(prop_var), xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance Explained",
type = "b")
#combine the class attribute to the train set
train.data <- data.frame(Class = train.cv$Class, pca$x)
#select 8 PCAs out of 9 PCAs
train.data <- train.data[,-10]
#perform the same PCA transformation on the test data (attribute of class is eliminated)
test.data <- predict(pca, newdata = test.cv[1:9])
test.data <- as.data.frame(test.data)
#select 8 PCAs out of 9 PCAs
test.data <- test.data[,-9]
#nn train
n<-names(train.data)
f <- as.formula(paste("Class ~", paste(n[!n %in% "Class"], collapse = " + ")))
nn <- neuralnet(f,data=train.data,hidden=c(11),linear.output=F,stepmax=15015)
#nn test
pr.nn <- neuralnet::compute(nn,test.data)
pr.nn_ <- pr.nn$net.result
print(pr.nn_)
#test result transform
pr.nn_[pr.nn_<0.5]<-0
pr.nn_[pr.nn_>=0.5]<-1
#error computation
test.cv.r <- test.cv$Class
cv.error[i] <- sum((test.cv.r - pr.nn_)^2)/nrow(test.cv)
miss_classified[i]<-length((pr.nn_-test.cv.r)[(pr.nn_-test.cv.r)!=0])
#process show
pbar$step()
}
|
f527c2360e4f9466cc3f457c4e4598d7ec50d9c2 | d09f16e2b8ed72942444402c047c6eefff0261fc | /cy-uptake/code/lengthPathLA.R | 833bebf4deacb360d540ecdfed94f07c405e9d13 | [] | no_license | Robinlovelace/osm-cycle | 55243d057c91c2217beba8d9149e8b8e3ec90c63 | 3fcd8cebce88a64c440347818b032dab2f0b4088 | refs/heads/master | 2020-05-17T20:50:43.862142 | 2015-07-08T11:38:25 | 2015-07-08T11:38:25 | 17,245,158 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,124 | r | lengthPathLA.R | # script to measure the length of cycle path by local authority
x <- c("rgdal", "rgeos")
lapply(x, require, character.only = T)
on <- readOGR("../bigdata/cc-subsets/", "onroad")
proj4string(on) <- CRS("+init=epsg:4326")
on <- spTransform(on, CRS(proj4string(la11)))
cp <- readOGR("../bigdata/cc-subsets/", "dedicated")
proj4string(cp) <- CRS("+init=epsg:4326")
cp <- spTransform(cp, CRS(proj4string(la11)))
m1 <- readOGR("../bigdata/cc-subsets/", "m1CycleStreets")
proj4string(m1) <- CRS("+init=epsg:4326")
m1 <- spTransform(m1, CRS(proj4string(la11)))
gLength(on); gLength(cp); gLength(m1)
load("updata/lam.RData")
names(lam)
lam$pathDist <- NA
for(i in 1:nrow(la11)){
cpS <- m1[la11[i, ], ]
# plot(la11[i,])
# plot(cpS, add = T)
# no need to plot but these look beautiful - should include some of these in paper!
lam$pathDist[i] <- gLength(cpS) / 1000
print(i)
}
lam$pathDPP <- lam$pathDist / lam$Allm.y
plot(lam$Abs.Growth, lam$pathDPP )
cor(lam$Abs.Growth, lam$pathDPP )
# object.size(cp) / 1000000 # 200 mb!
# cps <- SpatialLinesDataFrame(gSimplify(cp, tol=20), cp@data)
# object.size(cps) / 1000000
|
a1e952c3dac40d95a8f1891fde24d752d782d0e7 | f17524c4609ca21b3bf05b17e2670031ebe2f136 | /Species Distribution Models/Final_GLV_SDM.R | 67abe2e02a00a84a2f8a9c14b6f736ae85c18cf5 | [] | no_license | cliffbueno/Manuscripts | 98edb62d9ccd70b98c8d31f4c9d6c0d4f8c8b348 | 27a11135599bab6c630132a6af87b134d01f1a7c | refs/heads/master | 2023-04-11T05:14:39.090989 | 2023-03-22T00:46:59 | 2023-03-22T00:46:59 | 153,540,391 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,602 | r | Final_GLV_SDM.R | ### Alpine plant species distribution models with abiotic, plant and microbe predictor variables
# By Cliff Bueno de Mesquita, Fall 2014, Spring 2015
# Paper published in Ecography, 2015
### Setup
library(AICcmodavg)
library(modEvA)
library(car)
library(MASS)
library(minpack.lm)
library(rgl)
library(robustbase)
library(Matrix)
library(qpcR)
library(boot)
source("~/Desktop/Functions/logisticPseudoR2s.R") # Code for pseudo R2 values
setwd("~/Desktop/CU/2Research/SDM")
data <- read.csv("GLVDATA.csv", header = TRUE)
# Remove 10 plots. Now all below 100 stems
plant.pa<-data[-c(1,19,31,43,49,51,52,56,60,61),]
# Make terms to test linearity
plant.pa$logALT<-log(plant.pa$ALT)*plant.pa$ALT
plant.pa$logSOIL_H2O<-log(plant.pa$SOIL_H2O)*plant.pa$SOIL_H2O
plant.pa$logMeanSnow<-log(plant.pa$MeanSnow)*plant.pa$MeanSnow
plant.pa$logSAND<-log(plant.pa$SAND)*plant.pa$SAND
plant.pa$logPH<-log(plant.pa$PH)*plant.pa$PH
plant.pa$logTDN<-log(plant.pa$TDN)*plant.pa$TDN
plant.pa$logDOC<-log(plant.pa$DOC)*plant.pa$DOC
plant.pa$logDptotal<-log(plant.pa$Dptotal)*plant.pa$Dptotal
plant.pa$logDpinorg<-log(plant.pa$Dpinorg)*plant.pa$Dpinorg
plant.pa$logcirsco<-log(plant.pa$cirscoA)*plant.pa$cirscoA
plant.pa$logsilaca<-log(plant.pa$silacaA)*plant.pa$silacaA
plant.pa$logtrillu<-log(plant.pa$trilluA)*plant.pa$trilluA
plant.pa$logphlsib<-log(plant.pa$phlsibA)*plant.pa$phlsibA
plant.pa$loganggra<-log(plant.pa$anggraA)*plant.pa$anggraA
plant.pa$logsenfre<-log(plant.pa$senfreA)*plant.pa$senfreA
plant.pa$loghymgra<-log(plant.pa$hymgraA)*plant.pa$hymgraA
plant.pa$logeriper<-log(plant.pa$eriperA)*plant.pa$eriperA
plant.pa$logoxydig<-log(plant.pa$oxydigA)*plant.pa$oxydigA
plant.pa$logbisbis<-log(plant.pa$bisbisA)*plant.pa$bisbisA
plant.pa$logantalp<-log(plant.pa$antalpA)*plant.pa$antalpA
plant.pa$loggenalg<-log(plant.pa$genalgA)*plant.pa$genalgA
plant.pa$logmoss<-log(plant.pa$mossA)*plant.pa$mossA
plant.pa$loggeuros<-log(plant.pa$geurosA)*plant.pa$geurosA
plant.pa$logelyscr<-log(plant.pa$elyscrA)*plant.pa$elyscrA
plant.pa$logtrispi<-log(plant.pa$trispiA)*plant.pa$trispiA
plant.pa$logfesrub<-log(plant.pa$fesrubA)*plant.pa$fesrubA
plant.pa$logdescae<-log(plant.pa$descaeA)*plant.pa$descaeA
plant.pa$logkobmyo<-log(plant.pa$kobmyoA)*plant.pa$kobmyoA
plant.pa$logcarnar<-log(plant.pa$carnarA)*plant.pa$carnarA
plant.pa$logcarper<-log(plant.pa$carperA)*plant.pa$carperA
plant.pa$logcarpha<-log(plant.pa$carphaA)*plant.pa$carphaA
plant.pa$logcarnig<-log(plant.pa$carnigA)*plant.pa$carnigA
plant.pa$logligfil<-log(plant.pa$ligfilA)*plant.pa$ligfilA
# Code to compare nested models
#ModelChi <- model$deviance - model2$deviance
#chidf <- model$df.residual - model2$df.residual
#chisq.prob <- 1 - pchisq(ModelChi, chidf)
#chisq.prob
################################### Models ################################################
# Add different sets of variables manually. Forward and backward selection. Select based on AICc
# Carex nardina
carnar<-glm(carnar ~ ALT + SOIL_H2O + MeanSnow + Dpinorg + DOC, family = binomial, data = plant.pa)
carnar
carnar2<-glm(carnar ~ ALT + SOIL_H2O + MeanSnow + Dpinorg + DOC + descaeA + hymgraA + kobmyoA + mossA, family = binomial, data = plant.pa)
carnar2
carnar3<-glm(carnar ~ ALT + SOIL_H2O + MeanSnow + Dpinorg + DOC + acidGP3 + rhodo, family = binomial, data = plant.pa)
carnar3
carnar4<-glm(carnar ~ ALT + SOIL_H2O + MeanSnow + Dpinorg + DOC + descaeA + hymgraA + kobmyoA + mossA + rhodo, family = binomial, data = plant.pa)
carnar4
AICc(carnar)
AICc(carnar2)
AICc(carnar3)
AICc(carnar4)
logisticPseudoR2s(carnar)
logisticPseudoR2s(carnar2)
logisticPseudoR2s(carnar3)
logisticPseudoR2s(carnar4)
Dsquared(model = carnar, adjust = TRUE)
Dsquared(model = carnar2, adjust = TRUE)
Dsquared(model = carnar3, adjust = TRUE)
Dsquared(model = carnar4, adjust = TRUE)
dwt(carnar4)
vif(carnar4)
1/vif(carnar4)
mean(vif(carnar4))
carnaraics<-c(80.4296, 69.1591, 78.4370, 69.5050)
akaike.weights(carnaraics)
carnarCV<-cv.glm(data=plant.pa,glmfit=carnar,K=10)
carnarAUC<-AUC(model=carnar)
carnar2CV<-cv.glm(data=plant.pa,glmfit=carnar2,K=10)
carnar2AUC<-AUC(model=carnar2)
carnar3CV<-cv.glm(data=plant.pa,glmfit=carnar3,K=10)
carnar3AUC<-AUC(model=carnar3)
carnar4CV<-cv.glm(data=plant.pa,glmfit=carnar4,K=10)
carnar4AUC<-AUC(model=carnar4)
carnarCV$delta
carnar2CV$delta
carnar3CV$delta
carnar4CV$delta
carnarAUC$AUC
carnar2AUC$AUC
carnar3AUC$AUC
carnar4AUC$AUC
# Carex phaeocephala
carpha<-glm(carpha ~ Dptotal, family = binomial, data = plant.pa)
carpha
carpha2<-glm(carpha ~ Dptotal + carperA + senfreA, family = binomial, data = plant.pa)
carpha2
carpha3<-glm(carpha ~ Dptotal + acidGP1, family = binomial, data = plant.pa)
carpha3
carpha4<-glm(carpha ~ Dptotal + carperA + senfreA + acidGP1, family = binomial, data = plant.pa)
carpha4
AICc(carpha)
AICc(carpha2)
AICc(carpha3)
AICc(carpha4)
logisticPseudoR2s(carpha)
logisticPseudoR2s(carpha2)
logisticPseudoR2s(carpha3)
logisticPseudoR2s(carpha4)
Dsquared(model = carpha, adjust = TRUE)
Dsquared(model = carpha2, adjust = TRUE)
Dsquared(model = carpha3, adjust = TRUE)
Dsquared(model = carpha4, adjust = TRUE)
dwt(carpha4)
vif(carpha4)
1/vif(carpha4)
mean(vif(carpha4))
carphaaics<-c(51.5697,49.2826,49.9701,48.5399)
akaike.weights(carphaaics)
carphaCV<-cv.glm(data=plant.pa,glmfit=carpha,K=10)
carphaAUC<-AUC(model=carpha)
carpha2CV<-cv.glm(data=plant.pa,glmfit=carpha2,K=10)
carpha2AUC<-AUC(model=carpha2)
carpha3CV<-cv.glm(data=plant.pa,glmfit=carpha3,K=10)
carpha3AUC<-AUC(model=carpha3)
carpha4CV<-cv.glm(data=plant.pa,glmfit=carpha4,K=10)
carpha4AUC<-AUC(model=carpha4)
carphaCV$delta
carpha2CV$delta
carpha3CV$delta
carpha4CV$delta
carphaAUC$AUC
carpha2AUC$AUC
carpha3AUC$AUC
carpha4AUC$AUC
# Deschampsia caespitosa
descae<-glm(descae ~ PH + SOIL_H2O + Dptotal, family = binomial, data = plant.pa)
descae
descae2<-glm(descae ~ PH + SOIL_H2O + carnarA + senfreA, family = binomial, data = plant.pa)
descae2
descae3<-glm(descae ~ PH + SOIL_H2O + Dptotal + acidGP1 + acidGP7, family = binomial, data = plant.pa)
descae3
descae4<-glm(descae ~ PH + SOIL_H2O + carnarA + senfreA + acidGP7 + acidGP1, family = binomial, data = plant.pa)
descae4
AICc(descae)
AICc(descae2)
AICc(descae3)
AICc(descae4)
logisticPseudoR2s(descae)
logisticPseudoR2s(descae2)
logisticPseudoR2s(descae3)
logisticPseudoR2s(descae4)
Dsquared(model = descae, adjust = TRUE)
Dsquared(model = descae2, adjust = TRUE)
Dsquared(model = descae3, adjust = TRUE)
Dsquared(model = descae4, adjust = TRUE)
dwt(descae4)
vif(descae4)
1/vif(descae4)
mean(vif(descae4))
descaeaics<-c(80.8759,74.6818,77.3984,69.0363)
akaike.weights(descaeaics)
descaeCV<-cv.glm(data=plant.pa,glmfit=descae,K=10)
descaeAUC<-AUC(model=descae)
descae2CV<-cv.glm(data=plant.pa,glmfit=descae2,K=10)
descae2AUC<-AUC(model=descae2)
descae3CV<-cv.glm(data=plant.pa,glmfit=descae3,K=10)
descae3AUC<-AUC(model=descae3)
descae4CV<-cv.glm(data=plant.pa,glmfit=descae4,K=10)
descae4AUC<-AUC(model=descae4)
descaeCV$delta
descae2CV$delta
descae3CV$delta
descae4CV$delta
descaeAUC$AUC
descae2AUC$AUC
descae3AUC$AUC
descae4AUC$AUC
# Elymus scriberneri
elyscr<-glm(elyscr ~ MeanSnow, family = binomial, data = plant.pa)
elyscr
elyscr2<-glm(elyscr ~ MeanSnow + anggraA, family = binomial, data = plant.pa)
elyscr2
elyscr3<-glm(elyscr ~ MeanSnow + delta, family = binomial, data = plant.pa)
elyscr3
elyscr4<-glm(elyscr ~ MeanSnow + anggraA + delta + acidGP1, family = binomial, data = plant.pa)
elyscr4
AICc(elyscr)
AICc(elyscr2)
AICc(elyscr3)
AICc(elyscr4)
logisticPseudoR2s(elyscr)
logisticPseudoR2s(elyscr2)
logisticPseudoR2s(elyscr3)
logisticPseudoR2s(elyscr4)
Dsquared(model = elyscr, adjust = TRUE)
Dsquared(model = elyscr2, adjust = TRUE)
Dsquared(model = elyscr3, adjust = TRUE)
Dsquared(model = elyscr4, adjust = TRUE)
dwt(elyscr4)
vif(elyscr4)
1/vif(elyscr4)
mean(vif(elyscr4))
elyscraics<-c(46.7872, 46.3103, 45.2885, 44.7362)
akaike.weights(elyscraics)
elyscrCV<-cv.glm(data=plant.pa,glmfit=elyscr,K=10)
elyscrAUC<-AUC(model=elyscr)
elyscr2CV<-cv.glm(data=plant.pa,glmfit=elyscr2,K=10)
elyscr2AUC<-AUC(model=elyscr2)
elyscr3CV<-cv.glm(data=plant.pa,glmfit=elyscr3,K=10)
elyscr3AUC<-AUC(model=elyscr3)
elyscr4CV<-cv.glm(data=plant.pa,glmfit=elyscr4,K=10)
elyscr4AUC<-AUC(model=elyscr4)
elyscrCV$delta
elyscr2CV$delta
elyscr3CV$delta
elyscr4CV$delta
elyscrAUC$AUC
elyscr2AUC$AUC
elyscr3AUC$AUC
elyscr4AUC$AUC
# Festuca Rubra
fesrub<-glm(fesrub ~ ALT + TDN, data = plant.pa, family = binomial)
fesrub
fesrub2<-glm(fesrub ~ ALT + TDN + trispiA + carphaA + geurosA + kobmyoA + elyscrA, family = binomial, data = plant.pa)
fesrub2
fesrub3<-glm(fesrub ~ ALT + TDN + oxalo, family = binomial, data = plant.pa)
fesrub3
fesrub4<-glm(fesrub ~ ALT + TDN + trispiA + carphaA + geurosA + kobmyoA + elyscrA + acidGP3, family = binomial, data = plant.pa)
fesrub4
AICc(fesrub)
AICc(fesrub2)
AICc(fesrub3)
AICc(fesrub4)
logisticPseudoR2s(fesrub)
logisticPseudoR2s(fesrub2)
logisticPseudoR2s(fesrub3)
logisticPseudoR2s(fesrub4)
Dsquared(model = fesrub, adjust = TRUE)
Dsquared(model = fesrub2, adjust = TRUE)
Dsquared(model = fesrub3, adjust = TRUE)
Dsquared(model = fesrub4, adjust = TRUE)
dwt(fesrub4)
vif(fesrub4)
1/vif(fesrub4)
mean(vif(fesrub4))
fesrubaics<-c(80.2030, 74.9179, 80.9339, 76.4520)
akaike.weights(fesrubaics)
fesrubCV<-cv.glm(data=plant.pa,glmfit=fesrub,K=10)
fesrubAUC<-AUC(model=fesrub)
fesrub2CV<-cv.glm(data=plant.pa,glmfit=fesrub2,K=10)
fesrub2AUC<-AUC(model=fesrub2)
fesrub3CV<-cv.glm(data=plant.pa,glmfit=fesrub3,K=10)
fesrub3AUC<-AUC(model=fesrub3)
fesrub4CV<-cv.glm(data=plant.pa,glmfit=fesrub4,K=10)
fesrub4AUC<-AUC(model=fesrub4)
fesrubCV$delta
fesrub2CV$delta
fesrub3CV$delta
fesrub4CV$delta
fesrubAUC$AUC
fesrub2AUC$AUC
fesrub3AUC$AUC
fesrub4AUC$AUC
# Kobresia myosuroides
kobmyo<-glm(kobmyo ~ ALT, family = binomial, data = plant.pa)
kobmyo
kobmyo2<-glm(kobmyo ~ ALT + fesrubA, family = binomial, data = plant.pa)
kobmyo2
kobmyo3<-glm(kobmyo ~ ALT + acidGP3, family = binomial, data = plant.pa)
kobmyo3
kobmyo4<-glm(kobmyo ~ ALT + fesrubA + acidGP3 + sphingo, family = binomial, data = plant.pa)
kobmyo4
AICc(kobmyo)
AICc(kobmyo2)
AICc(kobmyo3)
AICc(kobmyo4)
logisticPseudoR2s(kobmyo)
logisticPseudoR2s(kobmyo2)
logisticPseudoR2s(kobmyo3)
logisticPseudoR2s(kobmyo4)
Dsquared(model = kobmyo, adjust = TRUE)
Dsquared(model = kobmyo2, adjust = TRUE)
Dsquared(model = kobmyo3, adjust = TRUE)
Dsquared(model = kobmyo4, adjust = TRUE)
dwt(kobmyo4)
vif(kobmyo4)
1/vif(kobmyo4)
mean(vif(kobmyo4))
kobmyoaics<-c(71.6094, 71.7590, 69.3965, 69.5209)
akaike.weights(kobmyoaics)
kobmyoCV<-cv.glm(data=plant.pa,glmfit=kobmyo,K=10)
kobmyoAUC<-AUC(model=kobmyo)
kobmyo2CV<-cv.glm(data=plant.pa,glmfit=kobmyo2,K=10)
kobmyo2AUC<-AUC(model=kobmyo2)
kobmyo3CV<-cv.glm(data=plant.pa,glmfit=kobmyo3,K=10)
kobmyo3AUC<-AUC(model=kobmyo3)
kobmyo4CV<-cv.glm(data=plant.pa,glmfit=kobmyo4,K=10)
kobmyo4AUC<-AUC(model=kobmyo4)
kobmyoCV$delta
kobmyo2CV$delta
kobmyo3CV$delta
kobmyo4CV$delta
kobmyoAUC$AUC
kobmyo2AUC$AUC
kobmyo3AUC$AUC
kobmyo4AUC$AUC
# Trisetum spicatum
trispi<-glm(trispi ~ MeanSnow + SAND, family = binomial, data = plant.pa)
AICc(trispi)
trispi2<-glm(trispi ~ MeanSnow + SAND + antalpA + anggraA, family = binomial, data = plant.pa)
trispi2
trispi3<-glm(trispi ~ MeanSnow + SAND + pseudo + tm7, family = binomial, data = plant.pa)
trispi3
trispi4<-glm(trispi ~ MeanSnow + SAND + antalpA + anggraA + tm7 + pseudo, family = binomial, data = plant.pa)
trispi4
AICc(trispi)
AICc(trispi2)
AICc(trispi3)
AICc(trispi4)
logisticPseudoR2s(trispi)
logisticPseudoR2s(trispi2)
logisticPseudoR2s(trispi3)
logisticPseudoR2s(trispi4)
Dsquared(model = trispi, adjust = TRUE)
Dsquared(model = trispi2, adjust = TRUE)
Dsquared(model = trispi3, adjust = TRUE)
Dsquared(model = trispi4, adjust = TRUE)
dwt(trispi4)
vif(trispi4)
1/vif(trispi4)
mean(vif(trispi4))
trispiaics<-c(82.2695,79.7835,79.8116,78.3555)
akaike.weights(trispiaics)
trispiCV<-cv.glm(data=plant.pa,glmfit=trispi,K=10)
trispiAUC<-AUC(model=trispi)
trispi2CV<-cv.glm(data=plant.pa,glmfit=trispi2,K=10)
trispi2AUC<-AUC(model=trispi2)
trispi3CV<-cv.glm(data=plant.pa,glmfit=trispi3,K=10)
trispi3AUC<-AUC(model=trispi3)
trispi4CV<-cv.glm(data=plant.pa,glmfit=trispi4,K=10)
trispi4AUC<-AUC(model=trispi4)
trispiCV$delta
trispi2CV$delta
trispi3CV$delta
trispi4CV$delta
trispiAUC$AUC
trispi2AUC$AUC
trispi3AUC$AUC
trispi4AUC$AUC
# Cirsium scopulorum
cirsco<-glm(cirsco ~ MeanSnow + ALT + SOIL_H2O, family = binomial, data = plant.pa)
cirsco
cirsco2<-glm(cirsco ~ MeanSnow + ALT + SOIL_H2O + carphaA + phlsibA + silacaA, family = binomial, data = plant.pa)
cirsco2
cirsco3<-glm(cirsco ~ MeanSnow + ALT + SOIL_H2O + delta + tm7, family = binomial, data = plant.pa)
cirsco3
cirsco4<-glm(cirsco ~ MeanSnow + ALT + SOIL_H2O + carphaA + phlsibA + silacaA + delta, family = binomial, data = plant.pa)
cirsco4
AICc(cirsco)
AICc(cirsco2)
AICc(cirsco3)
AICc(cirsco4)
logisticPseudoR2s(cirsco)
logisticPseudoR2s(cirsco2)
logisticPseudoR2s(cirsco3)
logisticPseudoR2s(cirsco4)
Dsquared(model = cirsco, adjust = TRUE)
Dsquared(model = cirsco2, adjust = TRUE)
Dsquared(model = cirsco3, adjust = TRUE)
Dsquared(model = cirsco4, adjust = TRUE)
dwt(cirsco4)
vif(cirsco4)
1/vif(cirsco4)
mean(vif(cirsco4))
cirscoaics<-c(52.4776, 48.6046, 49.9694,43.3514)
akaike.weights(cirscoaics)
cirscoCV<-cv.glm(data=plant.pa,glmfit=cirsco,K=10)
cirscoAUC<-AUC(model=cirsco)
cirsco2CV<-cv.glm(data=plant.pa,glmfit=cirsco2,K=10)
cirsco2AUC<-AUC(model=cirsco2)
cirsco3CV<-cv.glm(data=plant.pa,glmfit=cirsco3,K=10)
cirsco3AUC<-AUC(model=cirsco3)
cirsco4CV<-cv.glm(data=plant.pa,glmfit=cirsco4,K=10)
cirsco4AUC<-AUC(model=cirsco4)
cirscoCV$delta
cirsco2CV$delta
cirsco3CV$delta
cirsco4CV$delta
cirscoAUC$AUC
cirsco2AUC$AUC
cirsco3AUC$AUC
cirsco4AUC$AUC
# Geum rossii
geuros<-glm(geuros ~ DOC + Dpinorg + Dptotal, family = binomial, data = plant.pa)
geuros
geuros2<-glm(geuros ~ DOC + Dpinorg + Dptotal + carphaA + hymgraA, family = binomial, data = plant.pa)
geuros2
geuros3<-glm(geuros ~ DOC + Dpinorg + Dptotal + cyano, family = binomial, data = plant.pa)
geuros3
geuros4<-glm(geuros ~ DOC + Dpinorg + Dptotal + carphaA + hymgraA + cyano + delta, family = binomial, data = plant.pa)
geuros4
AICc(geuros)
AICc(geuros2)
AICc(geuros3)
AICc(geuros4)
logisticPseudoR2s(geuros)
logisticPseudoR2s(geuros2)
logisticPseudoR2s(geuros3)
logisticPseudoR2s(geuros4)
Dsquared(model = geuros, adjust = TRUE)
Dsquared(model = geuros2, adjust = TRUE)
Dsquared(model = geuros3, adjust = TRUE)
Dsquared(model = geuros4, adjust = TRUE)
dwt(geuros4)
vif(geuros4)
1/vif(geuros4)
mean(vif(geuros4))
geurosaics<-c(58.3605, 51.1933, 58.2956, 50.1202)
akaike.weights(geurosaics)
geurosCV<-cv.glm(data=plant.pa,glmfit=geuros,K=10)
geurosAUC<-AUC(model=geuros)
geuros2CV<-cv.glm(data=plant.pa,glmfit=geuros2,K=10)
geuros2AUC<-AUC(model=geuros2)
geuros3CV<-cv.glm(data=plant.pa,glmfit=geuros3,K=10)
geuros3AUC<-AUC(model=geuros3)
geuros4CV<-cv.glm(data=plant.pa,glmfit=geuros4,K=10)
geuros4AUC<-AUC(model=geuros4)
geurosCV$delta
geuros2CV$delta
geuros3CV$delta
geuros4CV$delta
geurosAUC$AUC
geuros2AUC$AUC
geuros3AUC$AUC
geuros4AUC$AUC
# Oxyria digyna
oxydig<-glm(oxydig ~ MeanSnow + SOIL_H2O + PH + Dpinorg, family = binomial, data = plant.pa)
oxydig
oxydig2<-glm(oxydig ~ MeanSnow + SOIL_H2O + PH + Dpinorg + carphaA + fesrubA, family = binomial, data = plant.pa)
oxydig2
oxydig3<-glm(oxydig ~ MeanSnow + SOIL_H2O + PH + Dpinorg + acidGP7 + delta + pseudo, family = binomial, data = plant.pa)
oxydig3
oxydig4<-glm(oxydig ~ MeanSnow + SOIL_H2O + PH + Dpinorg + carphaA + fesrubA + pseudo + delta, family = binomial, data = plant.pa)
oxydig4
AICc(oxydig)
AICc(oxydig2)
AICc(oxydig3)
AICc(oxydig4)
logisticPseudoR2s(oxydig)
logisticPseudoR2s(oxydig2)
logisticPseudoR2s(oxydig3)
logisticPseudoR2s(oxydig4)
Dsquared(model = oxydig, adjust = TRUE)
Dsquared(model = oxydig2, adjust = TRUE)
Dsquared(model = oxydig3, adjust = TRUE)
Dsquared(model = oxydig4, adjust = TRUE)
dwt(oxydig3)
vif(oxydig3)
1/vif(oxydig3)
mean(vif(oxydig3))
oxydigaics<-c(47.0146, 44.2358, 43.3568, 41.9309)
akaike.weights(oxydigaics)
oxydigCV<-cv.glm(data=plant.pa,glmfit=oxydig,K=10)
oxydigAUC<-AUC(model=oxydig)
oxydig2CV<-cv.glm(data=plant.pa,glmfit=oxydig2,K=10)
oxydig2AUC<-AUC(model=oxydig2)
oxydig3CV<-cv.glm(data=plant.pa,glmfit=oxydig3,K=10)
oxydig3AUC<-AUC(model=oxydig3)
oxydig4CV<-cv.glm(data=plant.pa,glmfit=oxydig4,K=10)
oxydig4AUC<-AUC(model=oxydig4)
oxydigCV$delta
oxydig2CV$delta
oxydig3CV$delta
oxydig4CV$delta
oxydigAUC$AUC
oxydig2AUC$AUC
oxydig3AUC$AUC
oxydig4AUC$AUC
# Senecio fremontii
senfre<-glm(senfre ~ PH, family = binomial, data = plant.pa)
senfre
senfre2<-glm(senfre ~ PH + mossA, family = binomial, data = plant.pa)
senfre2
senfre3<-glm(senfre ~ PH + oxalo + burk + ktedo, family = binomial, data = plant.pa)
senfre3
senfre4<-glm(senfre ~ PH + mossA + oxalo + burk + ktedo, family = binomial, data = plant.pa)
senfre4
AICc(senfre)
AICc(senfre2)
AICc(senfre3)
AICc(senfre4)
logisticPseudoR2s(senfre)
logisticPseudoR2s(senfre2)
logisticPseudoR2s(senfre3)
logisticPseudoR2s(senfre4)
Dsquared(model = senfre, adjust = TRUE)
Dsquared(model = senfre2, adjust = TRUE)
Dsquared(model = senfre3, adjust = TRUE)
Dsquared(model = senfre4, adjust = TRUE)
dwt(senfre4)
vif(senfre4)
1/vif(senfre4)
mean(vif(senfre4))
senfreaics<-c(75.9411, 74.4281, 66.1767, 68.0320)
akaike.weights(senfreaics)
senfreCV<-cv.glm(data=plant.pa,glmfit=senfre,K=10)
senfreAUC<-AUC(model=senfre)
senfre2CV<-cv.glm(data=plant.pa,glmfit=senfre2,K=10)
senfre2AUC<-AUC(model=senfre2)
senfre3CV<-cv.glm(data=plant.pa,glmfit=senfre3,K=10)
senfre3AUC<-AUC(model=senfre3)
senfre4CV<-cv.glm(data=plant.pa,glmfit=senfre4,K=10)
senfre4AUC<-AUC(model=senfre4)
senfreCV$delta
senfre2CV$delta
senfre3CV$delta
senfre4CV$delta
senfreAUC$AUC
senfre2AUC$AUC
senfre3AUC$AUC
senfre4AUC$AUC
# Silene acaulis
silaca<-glm(silaca ~ DOC + MeanSnow + ALT, family = binomial, data = plant.pa)
silaca
silaca2<-glm(silaca ~ DOC + MeanSnow + ALT + geurosA + anggraA + trispiA + fesrubA, family = binomial, data = plant.pa)
silaca2
silaca3<-glm(silaca ~ DOC + MeanSnow + ALT + acidGP1 + actinomyc, family = binomial, data = plant.pa)
silaca3
silaca4<-glm(silaca ~ DOC + MeanSnow + ALT + geurosA + anggraA + trispiA + fesrubA + burk, family = binomial, data = plant.pa)
silaca4
AICc(silaca)
AICc(silaca2)
AICc(silaca3)
AICc(silaca4)
logisticPseudoR2s(silaca)
logisticPseudoR2s(silaca2)
logisticPseudoR2s(silaca3)
logisticPseudoR2s(silaca4)
Dsquared(model = silaca, adjust = TRUE)
Dsquared(model = silaca2, adjust = TRUE)
Dsquared(model = silaca3, adjust = TRUE)
Dsquared(model = silaca4, adjust = TRUE)
dwt(silaca4)
vif(silaca4)
1/vif(silaca4)
mean(vif(silaca4))
silacaaics<-c(52.8054, 41.5426, 45.6006, 36.5904)
akaike.weights(silacaaics)
silacaCV<-cv.glm(data=plant.pa,glmfit=silaca,K=10)
silacaAUC<-AUC(model=silaca)
silaca2CV<-cv.glm(data=plant.pa,glmfit=silaca2,K=10)
silaca2AUC<-AUC(model=silaca2)
silaca3CV<-cv.glm(data=plant.pa,glmfit=silaca3,K=10)
silaca3AUC<-AUC(model=silaca3)
silaca4CV<-cv.glm(data=plant.pa,glmfit=silaca4,K=10)
silaca4AUC<-AUC(model=silaca4)
silacaCV$delta
silaca2CV$delta
silaca3CV$delta
silaca4CV$delta
silacaAUC$AUC
silaca2AUC$AUC
silaca3AUC$AUC
silaca4AUC$AUC
# Moss
moss<-glm(moss ~ MeanSnow + ALT, family = binomial, data = plant.pa)
moss
moss2<-glm(moss ~ MeanSnow + ALT + carperA + kobmyoA, family = binomial, data = plant.pa)
moss2
moss3<-glm(moss ~ MeanSnow + ALT + rhodo + oxalo, family = binomial, data = plant.pa)
moss3
moss4<-glm(moss ~ MeanSnow + ALT + carperA + kobmyoA + rhodo, family = binomial, data = plant.pa)
moss4
AICc(moss)
AICc(moss2)
AICc(moss3)
AICc(moss4)
logisticPseudoR2s(moss)
logisticPseudoR2s(moss2)
logisticPseudoR2s(moss3)
logisticPseudoR2s(moss4)
Dsquared(model = moss, adjust = TRUE)
Dsquared(model = moss2, adjust = TRUE)
Dsquared(model = moss3, adjust = TRUE)
Dsquared(model = moss4, adjust = TRUE)
dwt(moss4)
vif(moss4)
1/vif(moss4)
mean(vif(moss4))
mossaics<-c(85.0860, 82.4269, 82.6510, 81.5441)
akaike.weights(mossaics)
mossCV<-cv.glm(data=plant.pa,glmfit=moss,K=10)
mossAUC<-AUC(model=moss)
moss2CV<-cv.glm(data=plant.pa,glmfit=moss2,K=10)
moss2AUC<-AUC(model=moss2)
moss3CV<-cv.glm(data=plant.pa,glmfit=moss3,K=10)
moss3AUC<-AUC(model=moss3)
moss4CV<-cv.glm(data=plant.pa,glmfit=moss4,K=10)
moss4AUC<-AUC(model=moss4)
mossCV$delta
moss2CV$delta
moss3CV$delta
moss4CV$delta
mossAUC$AUC
moss2AUC$AUC
moss3AUC$AUC
moss4AUC$AUC
###################################### Predicted Probabilities ############################
carnardata<-subset(plant.pa,select=c(31,2,3,1,11,9,72))
carnardata$PP<-fitted(carnar3)
shapiro.test(carnardata$PP)
carphadata<-subset(plant.pa,select=c(33,10,81))
carphadata$PP<-fitted(carpha3)
shapiro.test(carphadata$PP)
descaedata<-subset(plant.pa,select=c(29,1,7,10,81))
descaedata$PP<-fitted(descae3)
shapiro.test(descaedata$PP)
elyscrdata<-subset(plant.pa,select=c(26,3,9,10,76))
elyscrdata$PP<-fitted(elyscr3)
shapiro.test(elyscrdata$PP)
fesrubdata<-subset(plant.pa,select=c(28,1,8,84))
fesrubdata$PP<-fitted(fesrub3)
shapiro.test(fesrubdata$PP)
kobmyodata<-subset(plant.pa,select=c(30,1,7,72))
kobmyodata$PP<-fitted(kobmyo3)
shapiro.test(kobmyodata$PP)
trispidata<-subset(plant.pa,select=c(27,3,4,75))
trispidata$PP<-fitted(trispi3)
shapiro.test(trispidata$PP)
cirscodata<-subset(plant.pa,select=c(12,3,1,2,76))
cirscodata$PP<-fitted(cirsco3)
shapiro.test(cirscodata$PP)
geurosdata<-subset(plant.pa,select=c(25,9,3,2,80))
geurosdata$PP<-fitted(geuros3)
shapiro.test(geurosdata$PP)
oxydigdata<-subset(plant.pa,select=c(20,7,2,3,11,9,74))
oxydigdata$PP<-fitted(oxydig3)
shapiro.test(oxydigdata$PP)
senfredata<-subset(plant.pa,select=c(17,7,84))
senfredata$PP<-fitted(senfre3)
shapiro.test(senfredata$PP)
silacadata<-subset(plant.pa,select=c(13,3,9,81))
silacadata$PP<-fitted(silaca3)
shapiro.test(silacadata$PP)
# Correlations between predicted prob. and bacterial abundances
cor.test(carnardata$acidGP3, carnardata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(carphadata$acidGP1, carphadata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(descaedata$acidGP1, descaedata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(elyscrdata$delta, elyscrdata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(fesrubdata$oxalo, trispidata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(kobmyodata$acidGP3, kobmyodata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(trispidata$pseudo, trispidata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(cirscodata$delta, cirscodata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(geurosdata$cyano, geurosdata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(oxydigdata$acidGP7, oxydigdata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(senfredata$oxalo, senfredata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
cor.test(silacadata$acidGP1, silacadata$PP, alternative = "two.sided", method = "kendall", conf.level = 0.95)
#8/12 significant, 1 marginally significant
line1<-lm(PP ~ acidGP3, data = carnardata)
line2<-lm(PP ~ acidGP1, data = carphadata)
line3<-lm(PP ~ acidGP1, data = descaedata)
line4<-lm(PP ~ delta, data = elyscrdata)
line5<-lm(PP ~ oxalo, data = fesrubdata)
line6<-lm(PP ~ acidGP3, data = kobmyodata)
line12<-lm(PP ~ pseudo, data = trispidata)
line7<-lm(PP ~ delta, data = cirscodata)
line8<-lm(PP ~ cyano, data = geurosdata)
line9<-lm(PP ~ acidGP7, data = oxydigdata)
line10<-lm(PP ~ oxalo, data = senfredata)
line11<-lm(PP ~ acidGP1, data = silacadata)
# Figure 3
par(mfrow=c(4,3), oma=c(3,3,1,1) +0.1,mar=c(2,0.75,2,0.75) +0.1,mgp=c(2,1,0),xpd=NA)
par(xaxs="i")
plot(carnardata$acidGP3, carnardata$PP,ylab="",xlab="Acidobacteria Gp3", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="C. nardina", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=TRUE); lines(carnardata$acidGP3, fitted(line1), col="purple"); text(x=0.11, y=0.35, "τ = 0.38, p<0.01", cex=0.75)
plot(carphadata$acidGP1, carphadata$PP,ylab="",xlab="Acidobacteria Gp1", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="C. phaeocephala", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=FALSE); text(x=0.11, y=0.35, "τ = 0.09, p>0.05", cex=0.75)
plot(descaedata$acidGP1, descaedata$PP,ylab="", xlab="Acidobacteria Gp1", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="D. cespitosa", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=FALSE); text (x=0.11, y=0.35, "τ = 0.13, p>0.05", cex=0.75)
plot(elyscrdata$delta, elyscrdata$PP,ylab="",xlab="Deltaproteobacteria", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="E. scriberneri", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=TRUE); lines(elyscrdata$delta, fitted(line4), col="purple"); text(x=0.11, y=0.35, "τ = -0.50, p<0.01", cex=0.75)
plot(fesrubdata$oxalo, fesrubdata$PP,ylab="", xlab="Oxalobacteraceae", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="F. rubra", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=FALSE); text (x=0.11, y=0.35, "τ = -0.12, p>0.05", cex=0.75)
plot(kobmyodata$acidGP3, kobmyodata$PP,ylab="", xlab="Acidobacteria Gp3", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="K. myosuroides", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=FALSE); lines(kobmyodata$acidGP3, fitted(line6), col="purple"); text (x=0.11, y=0.35, "τ = 0.53, p<0.01", cex=0.75)
plot(trispidata$pseudo, trispidata$PP, ylab="", xlab="Pseudonocardiaceae", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="T. spicatum", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=TRUE); lines(trispidata$pseudo, fitted(line12), col="purple"); text(x=0.11, y=0.35, "τ = 0.58, p<0.01", cex=0.75)
plot(cirscodata$delta, cirscodata$PP,ylab="",xlab="Deltaproteobacteria", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="C. scopulorum", font.main=3, adj=0.99, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=FALSE); text(x=0.11, y=0.75, "τ = 0.07, p>0.05", cex=0.75)
plot(geurosdata$cyano, geurosdata$PP,ylab="", xlab="Cyanobacteria", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="G. rossii", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=FALSE); lines(geurosdata$cyano, fitted(line8), col="purple"); text (x=0.11, y=0.35, "τ = -0.38, p<0.01", cex=0.75)
plot(oxydigdata$acidGP7, oxydigdata$PP,ylab="", xlab="Acidobacteria Gp7", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="O. digyna", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=TRUE); lines(oxydigdata$acidGP7, fitted(line9), col="purple"); text (x=0.11, y=0.35, "τ = 0.28, p<0.01", cex=0.75)
plot(senfredata$oxalo, senfredata$PP,ylab="", xlab="Oxalobacteraceae", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="S. fremontii", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=FALSE); lines(senfredata$oxalo, fitted(line10), col="purple"); text (x=0.11, y=0.35, "τ = 0.55, p<0.01", cex=0.75)
plot(silacadata$acidGP1, silacadata$PP,ylab="", xlab="Acidobacteria Gp1", cex.main=1, ylim=c(0,1),xlim=c(0,0.14),yaxt='n');title(main="S. acaulis", font.main=3, adj=0.85, cex.main=0.90, line=-0.69); axis(side=2, at=c(0,0.2,0.4,0.6,0.8,1), labels=FALSE); lines(silacadata$acidGP1, fitted(line11), col="purple"); text (x=0.11, y=0.35, "τ = -0.24, p<0.01", cex=0.75)
mtext("Predicted Probability of Occurrence",side=2,outer=TRUE,cex=0.8,line=1.3)
mtext("Bacteria Relative Abundance",side=1,outer=TRUE,cex=0.8,line=1.25)
################################# Deviance Explained ######################################
# D2 Values for Abiotic, Abiotic + Plant, and Full models
carnar<-c(0.171,0.401,0.425)
carpha<-c(0.143,0.241,0.287)
descae<-c(0.099,0.192,0.304)
elyscr<-c(0.177,0.217,0.316)
fesrub<-c(0.165,0.307,0.310)
kobmyo<-c(0.063,0.077,0.147)
trispi<-c(0.135,0.190,0.240)
cirsco<-c(0.433,0.555,0.656)
geuros<-c(0.124,0.312,0.405)
oxydig<-c(0.437,0.543,0.654)
senfre<-c(0.158,0.189,0.316)
silaca<-c(0.293,0.606,0.732)
# Figure 2
par(mfrow=c(4,3), oma=c(3,3,1,1) +0.1,mar=c(0.75,0.75,0.75,0.75) +0.1,mgp=c(2,1,0),xpd=NA)
par(xaxs="i")
barplot(carnar, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, ylim=c(0,0.8),xlim=c(0,0.3));title(main="C. nardina", font.main=3,adj=0.05,cex.main=0.90,line=-1); text(x=0.155,y=carnar[2], "*", pos=3, offset=0.1, cex=1.5)
barplot(carpha, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, ylim=c(0,0.8),xlim=c(0,0.3),yaxt='n');title(main="C. phaeocephala", font.main=3, adj=0.05, cex.main=0.90,line=-1); axis(side=2, at=c(0,0.2,0.4,0.6,0.8), labels=FALSE); text(x=0.255,y=carpha[3], "*", pos=3, offset=0.1, cex=1.5)
barplot(descae, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, ylim=c(0,0.8),xlim=c(0,0.3),yaxt='n');title(main="D. cespitosa", font.main=3,adj=0.05, cex.main=0.90,line=-1); axis(side=2, at=c(0,0.2,0.4,0.6,0.8), labels=FALSE); text(x=0.255,y=descae[3], "**", pos=3, offset=0.1, cex=1.5)
barplot(elyscr, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, ylim=c(0,0.8),xlim=c(0,0.3));title(main="E. scriberneri", font.main=3,adj=0.05, cex.main=0.90,line=-1); text(x=0.255,y=elyscr[3], "*", pos=3, offset=0.1, cex=1.5)
barplot(fesrub, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, ylim=c(0,0.8),xlim=c(0,0.3),yaxt='n');title(main="F. rubra", font.main=3,adj=0.05, cex.main=0.90,line=-1); axis(side=2, at=c(0,0.2,0.4,0.6,0.8), labels=FALSE); text(x=0.155,y=fesrub[2], "**", pos=3, offset=0.1, cex=1.5)
barplot(kobmyo, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, ylim=c(0,0.8),xlim=c(0,0.3),yaxt='n');title(main="K. myosuroides", font.main=3,adj=0.05,cex.main=0.90,line=-1); axis(side=2, at=c(0,0.2,0.4,0.6,0.8), labels=FALSE); text(x=0.255,y=kobmyo[3], "**", pos=3, offset=0.1, cex=1.5)
barplot(trispi, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, cex.names=0.75, ylim=c(0,0.8),xlim=c(0,0.3));title(main="T. spicatum", font.main=3,adj=0.05,cex.main=0.90,line=-1); text(x=0.255,y=trispi[3], "**", pos=3, offset=0.1, cex=1.5)
barplot(cirsco, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, ylim=c(0,0.8),xlim=c(0,0.3),yaxt='n');title(main="C. scopulorum", font.main=3,adj=0.05, cex.main=0.90,line=-1); axis(side=2, at=c(0,0.2,0.4,0.6,0.8), labels=FALSE); text(x=0.255,y=cirsco[3], "**", pos=3, offset=0.1, cex=1.5)
barplot(geuros, space=0.01, width=0.1, col=c("white","gray70","gray50"), cex.main=1, ylim=c(0,0.8),xlim=c(0,0.3),yaxt='n');title(main="G. rossii", font.main=3,adj=0.05, cex.main=0.90,line=-1); axis(side=2, at=c(0,0.2,0.4,0.6,0.8), labels=FALSE); text(x=0.255,y=geuros[3], "*", pos=3, offset=0.1, cex=1.5)
barplot(oxydig, space=0.01, width=0.1,names.arg=c("A","A+P","FULL"), col=c("white","gray70","gray50"), cex.main=1, cex.names=0.75, ylim=c(0,0.8),xlim=c(0,0.3));title(main="O. digyna", font.main=3,adj=0.05, cex.main=0.90,line=-1); text(x=0.255,y=oxydig[3], "**", pos=3, offset=0.1, cex=1.5)
barplot(senfre, space=0.01, width=0.1, names.arg=c("A","A+P","FULL"), col=c("white","gray70","gray50"), cex.main=1,cex.names=0.75, ylim=c(0,0.8),xlim=c(0,0.3),yaxt='n');title(main="S. fremontii", font.main=3,adj=0.05, cex.main=0.90,line=-1); axis(side=2, at=c(0,0.2,0.4,0.6,0.8), labels=FALSE); text(x=0.255,y=senfre[3], "**", pos=3, offset=0.1, cex=1.5)
barplot(silaca, space=0.01, width=0.1, names.arg=c("A","A+P","FULL"), col=c("white","gray70","gray50"), cex.main=1, cex.names=0.75, ylim=c(0,0.8),xlim=c(0,0.3),yaxt='n');title(main="S. acaulis", font.main=3,adj=0.05,cex.main=0.90,line=-1); axis(side=2, at=c(0,0.2,0.4,0.6,0.8), labels=FALSE); text(x=0.255,y=silaca[3], "**", pos=3, offset=0.1, cex=1.5)
mtext("Model",side=1,outer=TRUE,cex=0.8,line=1.6)
mtext("Adj. D Squared Value",side=2,outer=TRUE,cex=0.8,line=1.2)
|
2816d8e7559b3e18714569bd70c698cf73d3897c | a047f577562585eb32b386d247b5637c1613cb3e | /code/rpart.R | 2987f81ca0a73cf69d21b0d3e55a3116683baf02 | [] | no_license | jshen226/STAT-628-2 | 636293fa00ee548aedcff9032a1676ba8ed30d3f | deabe20d2ca94283b725363b0deebac9115dfe4c | refs/heads/master | 2021-10-25T06:11:54.085872 | 2019-04-02T02:04:12 | 2019-04-02T02:04:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 537 | r | rpart.R | require(rpart)
require(rpart.plot)
df <- read.csv('clean_attributes.csv', header = T, na.strings=c())
df <- df[, -1]
#The missing rate of all the attributes is around 0.5
sum(is.na(df)) / (5693*68)
df1 <- df[, -which(apply(df, 2, function(x){
sum(is.na(x)) / 5693
}) > 0.5)]
#After deleting columns with missing rate > 0.5, the total missing rate reduces to 0.165
sum(is.na(df1)) / (5693*37)
tree <- rpart(american_traditional_star ~ ., method = 'anova', data = df1,
control = list(maxdepth = 3, cp = 0))
rpart.plot(tree) |
f45c23b6470d7c1e72a5c46e5cf40c4f7c0a06ca | 17f2a5bda68e2df016bfc0833e29b4ff7841d517 | /man/fnDetermineScoreColumns.Rd | b47ffb4ca510ee78abc2d7a90b4d9252afc6f99a | [] | no_license | PSMD-Psychometrics/-old-psychometricsPSMD-old- | 3a5b6b51896c41154f547b7251a9d0edef1b28fc | 3a5c817fd6e0ddc357590e94c5ca2b7f803ad3c4 | refs/heads/master | 2021-07-03T09:22:30.848231 | 2017-09-22T09:03:09 | 2017-09-22T09:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,987 | rd | fnDetermineScoreColumns.Rd | \name{fnDetermineScoreColumns.}
\alias{fnDetermineScoreColumns.}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{A function to determine which column or columns in a data frame contain (numeric) scores.
%% ~~function to do ... ~~
}
\description{
fnDetermineScoreColumns is designed to determine which column or columns in a data frame contain (numeric) scores. It operates based on a series of conditionals which evaluate aspects such as whether the variable is numeric, whether the values have a range typical of scoring schemes, or have ranges narrower or broader than scoring schemes used in PSMD.
}
\usage{
fnDetermineScoreColumns.(Dataframe)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Dataframe}{
Dataframe is a data frame of which you want to determine the possible score column(s).
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Dr Daniel Zahra, e:psychometrics@plymouth.ac.uk
}
\note{
DZ250417: This is currently an experimental function and will be updated to handle real data more accurately.
DZ250417: Build in a probabilistic element to provide the single best-guess if length(Possible.Score.Columns)>1.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
fnDetermineScoreColumns.(dataExample) will return a list of column names for the columns the function thinks could be scores.
In the case of dataExamples, this is multiple possibilities. In reality, there is likely to be fewer.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
4465865b5c32d5e4e07e784e4a98615ce7cf33d3 | e517b80116242ca6a62db130fb8bb8293a0db403 | /man/h_ttp.Rd | 849fce314a9cb1c4122516c6dc5636d91ff011c6 | [] | no_license | cran/RWsearch | f4f609181ad7b22e47af29753875e19fd8a1ef15 | b1a27cc2815019cee453379fd29c54039977f613 | refs/heads/master | 2023-03-15T17:47:14.146695 | 2022-02-22T13:00:06 | 2022-02-22T13:00:06 | 236,888,181 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 713 | rd | h_ttp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h_ttp.R
\name{h_ttp}
\alias{h_ttp}
\title{Open a Web Page in the Browser}
\usage{
h_ttp(..., char = NULL, https = TRUE, www = FALSE)
}
\arguments{
\item{...}{any format recognized by \code{\link{cnsc}}, except list.
A regular web address.}
\item{char}{(name to) a character vector. Use this argument if
\code{...} fails or if you call the function from another function.}
\item{https}{logical. Use https or http.}
\item{www}{logical. Add www. to the address.}
}
\description{
\code{h_ttp} opens the page coresponding to the mentionned address in the default browser.
}
\examples{
if (interactive()) {
h_ttp("www.r-project.org")
}
}
|
51f7cd07393357ff416de9c7b35a3b618b4edde0 | 93adca55b253a7fb6ef72b7d451b4a39fea76be1 | /Web_app/Source/LSA.R | c2e218f97209273e4f0b30679fd0abfb03d09a54 | [] | no_license | Option10/Text-Mining-with-R | 4879447599fbe94ddfc7bb16cbad8e81f04edd5b | 0b1d0a5d6b6d2b91e8fccc2427b319e5f1d86dce | refs/heads/master | 2020-04-04T23:20:47.718750 | 2018-12-19T14:42:28 | 2018-12-19T14:42:28 | 156,355,390 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,368 | r | LSA.R | LSA <- function(df,nv){
library(irlba)
## analyzing Tokens:
#-------------------
# Our function for calculating relative term frequency (TF)
term.frequency <- function(row) {
row / rowSums(row)
}
# Our function for calculating inverse document frequency (IDF)
inverse.doc.freq <- function(col) {
corpus.size <- length(col[,1])
doc.count <- colSums(col > 0)
log10(corpus.size / doc.count)
}
# Our function for calculating TF-IDF.
tf.idf <- function(tf, idf) {
tf * idf
}
print("tf-idf")
# First step, normalize all documents via TF.
tokens.tf <- term.frequency(tokens.matrix)
# Second step, calculate the IDF vector that we will use - both
tokens.idf <- inverse.doc.freq(tokens.matrix)
# Lastly, calculate TF-IDF for our training corpus.
tokens.tfidf <- tf.idf(tokens.tf,tokens.idf)
## Perform SVD. Specifically, reduce dimensionality down to 'nv' columns
#-----------------------------------------------------------------------
# for our latent semantic analysis (LSA).
print("SVD")
irlba <- irlba(tokens.tfidf, nv = nv, maxit = 1000)
# line names
rownames(irlba$v) <- colnames(tokens.matrix)
rownames(irlba$u) <- row.names(tokens.matrix)
saveRDS(irlba, file = "irlba", ascii = FALSE, version = NULL,
compress = TRUE, refhook = NULL)
return(irlba)
} |
3f06225f2c6101ae90a3df51ee80745bdeec034f | 0ba8eb05e0f759d20f1a4fa7f1eff12ab69aa576 | /R/exampledataprep.R | deb3f6b7b44c50abecfa22f8c2628e9807362868 | [
"MIT"
] | permissive | tdbennett/robvis | 8cd273f4d5b88dda98405a04fe26a22e272386a7 | 1a19550f5b9d218efede7434b1a68a0f9f9d1df6 | refs/heads/master | 2022-07-29T13:00:26.908727 | 2020-05-15T22:18:24 | 2020-05-15T22:18:24 | 261,832,081 | 1 | 0 | NOASSERTION | 2020-05-06T17:30:01 | 2020-05-06T17:30:00 | null | UTF-8 | R | false | false | 609 | r | exampledataprep.R | # Load datasets
# data_rob1 <- read.csv("data_raw/data_rob1.csv", header = TRUE,
# fileEncoding = "latin1")
# data_rob2 <- read.csv("data_raw/data_rob2.csv", header = TRUE,
# fileEncoding = "latin1")
# data_robins <- read.csv("data_raw/data_robins.csv", header = TRUE,
# fileEncoding = "latin1")
# data_quadas <- read.csv("data_raw/data_quadas.csv", header = TRUE,
# fileEncoding = "latin1")
# usethis::use_data(data_rob1, overwrite = TRUE)
# usethis::use_data(data_rob2, overwrite = TRUE)
# usethis::use_data(data_quadas, overwrite = TRUE)
# usethis::use_data(data_robins, overwrite = TRUE)
|
79391dc1acaf8f9946f8a75d9e9a3dd37987020d | 5fe207e5b903cae727b8b006b9063599d70bc9cd | /man/BIOdry-package.Rd | eed69e50c3bdba9dde45bc35227a1de01deed9de | [] | no_license | cran/BIOdry | dc36fa1158684e5fc79c19ec83f82252ad5690b5 | a8c849bb7b577debcabe177afde5d9ed9232f8a4 | refs/heads/master | 2022-05-11T09:58:57.665427 | 2022-05-02T18:52:02 | 2022-05-02T18:52:02 | 48,850,280 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 951 | rd | BIOdry-package.Rd | \name{BIOdry-package}
\alias{BIOdry-package}
\alias{BIOdry}
\docType{package}
\title{Multilevel Modeling of Dendroclimatical Fluctuations}
\description{Multilevel ecological data series (MEDS) are sequences of observations ordered according to temporal/spatial hierarchies that are defined by sample designs, with sample variability confined to ecological factors. Dendroclimatic MEDS of tree rings and climate are modeled into normalized fluctuations of tree growth and aridity. Modeled fluctuations (model frames) are compared with Mantel correlograms on multiple levels defined by sample design. Package implementation can be understood by running examples in modelFrame(), and muleMan() functions.}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{BIOdry}
\packageIndices{BIOdry}
Maintainer: \packageMaintainer{BIOdry}
}
\author{Wilson Lara <wilarhen@gmail.com>, Felipe Bravo <fbravo@pvs.uva.es>}
\keyword{ package }
|
4e5cfb1dcfcba7b3dab6f581cf33fef61934a51e | 1514af7a019aefbfb3bb73c34293c29b80eae65d | /compound.R | 50cc55f4b6ab29363dada45fd1e0362f4f05ed8e | [] | no_license | jhurtado13/chem160module13 | 52b6579a16207757c7f68c94607b9dc32123e836 | 2b7cb131b38db997581bb5a1cce7c436ac8308fb | refs/heads/main | 2023-01-07T16:16:16.056441 | 2020-11-02T07:08:03 | 2020-11-02T07:08:03 | 309,285,618 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 167 | r | compound.R | for (n in 1:30) {
if (n>20 && n%%2==0) {
cat(n,' is an even amd greater than 20\n')}
if (n%%10==0 || n<10) {
cat(n," is a multiple of 10 or less than 10\n")
}
} |
612c7bc50d3df2434d70249993fda152977c9c3a | b98c4a73461821e550e4e6bb932dd7f382156225 | /aula6/exercicio.r | 08c1f8fe78f62e71f5ccddbd5c112553cc48d560 | [] | no_license | kinderferraz/pfuel | d00542ebf49ca9e606cc3b404180ceb294de23a6 | 2fdc35c34f04dac3a13286eee1e81d43f07de780 | refs/heads/master | 2023-03-10T16:33:14.824766 | 2021-02-23T12:52:05 | 2021-02-23T12:52:05 | 341,554,196 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,348 | r | exercicio.r | ## Alkindar Rodrigues -- SP3029956
## Aula 6
## exercicio 1 A função Filtro atua aplicando um predicado (uma função
## que retorna verdadeiro ou falso) a cada elemento de vetor e
## selecionando aqueles que tem resultado Verdadeiro para uma cópia do
## vetor. Assim, apenas os valores que satisfazem o predicado são retidos.
## exercio 2
impar <- function(n){
n %% 2 == 1
}
## exercicio 3
impares <- Filter(impar, c(1:10))
## exercicio 4
## a
maior <- function(n, par){
n > par
}
## b
multiplo <- function(n, par){
n %% par == 0
}
## c
menor <- function(n, par){
n < par
}
## d -- fiz este exercicio usando closure, pois achei mais elegante
## nao reparei que mais abaixo há um exercicio para usar closure.
predCombinados <- function(maiores, menores, multiplos){
function (n){
maior(n, maiores) && multiplo(n, multiplos) && menor(n, menores)
}
}
multiplosFiltros <- function (arr, maiores, menores, multiplos) {
pred <- predCombinados(maiores, menores, multiplos)
Filter(pred, arr)
}
multiplosFiltros(c(1:100), 24, 76, 5)
## exercicio 5
filtroComplexo <- function(arr, maiores, menores, multiplos){
Filter(function(n) {
maior(n, maiores) && menor(n, menores) && multiplo(n, multiplos)
}, c(1:100))
}
filtroComplexo(c(1:100), 24, 76, 5)
## exercicio 6 -> 4)d
## exercicio 7
## A função map aplica uma função a uma sequencia de elementos de um
## ou mais vetores e retorna outro vetor com o resultado das aplicações,
## em ordem. Quando mais de um vetor está presente, o primeiro
## argumento da função é proveniente do primeiro vetor, o segundo
## argumento do segundo vetor e assim sucessivamente. A documentação
## da linguagem R explica que os vetores são reciclados, isto é, se um
## for menor que os demais, ele volta para o início. Outras linguagens
## não implementam este comportamento.
## exercico 8
## como a função é um predicado, isto é, seu retorno é true ou false,
## estes valores sao guardados no vetor.
mapa <- Map(predCombinados(maiores=24, menores=26, multiplos=5),
c(1:100))
## exercicio 8
duplica <- function(n){
n * 2
}
## exercicio 9
duplicados <- unlist(Map(duplica, c(1:10)))
## exercicio 10
## A função reduce atua aplicando uma função a cada elemento de uma
## sequencia e acumulando o resultado em um valor. Para tanto, a
## função deve aceitar dois parametros:
## - um valor da sequencia
## - um acumulador
## este segundo parametro recebe o resultado da aplicação anterior da
## função. Segundo a implementação, ele pode receber tanto um valor
## inicial definido pelo programador quanto um segundo elemento da
## sequencia.
## exercicio 11
red <- function(f, seq){
acc <- seq[1]
for(idx in seq(2, length(seq))){
aac <- f(seq[idx], acc)
}
acc
}
## exercicio 12
soma <- Reduce(sum, c(1:10))
## exercicio 13
library(sqldf)
cliente <- sample(c(1:10), 100, replace=TRUE)
compras <- sample(c(0:1000), 100, replace=TRUE)
df <- data.frame(cliente, compras)
dados <- sqldf("
select
cliente,
sum(compras) as total, -- reduce
case
when sum(compras) > 6000 then 0.2
when sum(compras) > 4000 then 0.1
end as desconto -- map
from
df
where cliente > 5 -- filter
group by
cliente
")
## exercicio 14
## A programação imperativa é baseada numa sequencia de
## instruções passadas ao computador. Nesta forma de progamação, nao há
## estruturas complexas (funções, objetos, laços) . O código
## fica, assim, reduzido às instruções que a máquina deve executar, e
## cada instrução altera o estado do programa, alterando os valores
## armazenados fisicamente.
## exercicio 15
## A programação procedural é aquela baseada na modularização do
## código em funções ou procedures. Existem também outras estruturas
## de controle do código, como condicionais e laços de
## repetição. Neste paradigma, existe o conceito de escopo de uma
## variável, que restringe quais partes do código tem acesso de
## leitura e escrita sobre uma variável, aumentando a segurança do
## código.
## exercicio 16
## A programação declarativa, ao contrário das anteriores, define
## apenas a lógica a ser usada para produzir o resultado, sem definir
## as estruturas de controle usadas para isso. Este paradigma, que
## inclui linguagens como SQL, html, e alguns frameworks em
## JavaScript, define a forma do resultado final e delega ao
## compilador ou interpretador que defina as sequencias de controle
## necessárias para chegar ao resultado.
## exercicio 17
## Uma função pura é aquela que independe do estado do programa para
## executar: ela nao o altera nem o lê, e seu resultado depende apenas
## do valor que lhe é passado como entrada.
## exercicio 18
## Uma função de alta ordem é aquela que pode receber outra função
## como parametro de entrada, e aplicá-la conforme achar necessário,
## ou mesmo modificá-la. Map, Reduce e Filter são funções de alta
## ordem clássicas.
## exercicio 19
## Closure é uma forma de definição de funções em tempo de execução, e
## é usada quando uma função depende de parametros ainda desconhecidos
## para ser definida. Para definir uma closure, uma função deve
## retornar outra, e com isso esta função tem acesso ao espaço de
## variáveis da função mãe.
|
b74ee8dc6abebb2c7df8ef243f17c3d070c98e69 | 76773e57ec1081a29afb1c06baee47628a147c03 | /MGSC_410_Paralellization.R | f972d17163599aabb0d9da8e18e0e8882c23fa29 | [] | no_license | jonhersh/MGSC_410 | 793eb2df2cd657b999ff59015d5092510995987f | a19f1e15e392fe64ab1cb70db8857ddfdcdfffff | refs/heads/master | 2021-02-05T18:33:49.731687 | 2020-02-28T17:15:36 | 2020-02-28T17:15:36 | 243,815,901 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,412 | r | MGSC_410_Paralellization.R | # install.packages("doParallel")
## doParallel package
library(doParallel)
library('microbenchmark')
numCores <- detectCores()
numCores
# register your cores to doParallel
# note on windows machines can only set = 1 :(
registerDoParallel(cores = numCores)
# regular for loop
for(i in 1:3){
print(sqrt(i))
}
# paralellized for loop!
foreach(i=1:3) %dopar% sqrt(i)
# a real example with bootstrapping
getPrimeNumbers <- function(n) {
n <- as.integer(n)
primes <- rep(TRUE, n)
primes[1] <- FALSE
last.prime <- 2L
for(i in last.prime:floor(sqrt(n)))
{
primes[seq.int(2L*last.prime, n, last.prime)] <- FALSE
last.prime <- last.prime + min(which(primes[(last.prime+1):n]))
}
which(primes)
}
# use the prime number
getPrimeNumbers(100)
index <- 10:10000
library(tictoc)
tic()
results <- c()
for(i in index){
results[[i]] <- getPrimeNumbers(i)
}
toc()
# or if you prefer the microbenchmark package
# devtools::install_github("joshuaulrich/microbenchmark")
microbenchmark::microbenchmark(
for(i in index){ results[[i]] <- getPrimeNumbers(i) }
)
# let's try now with doParallel
library(doParallel)
numCores <- detectCores()
registerDoParallel(cores = numCores)
tic()
results <- foreach(i = 10:10000) %dopar% getPrimeNumbers(i)
toc()
# what if we want to store the results from each foreach loop?
results <- foreach(1:100, .combine = data.frame) %dopar% {
# do something
}
# generate 1000 random draws of length 1000
tic()
results <- foreach(i=1:1000, .combine = data.frame) %dopar% {
data.frame(rands = rnorm(1000))
}
toc()
head(results)
# caret package
# install.packages('caret')
library(caret)
# let's get some real data
library(useful)
library(caret)
install.packages('Ecdat')
data(Griliches, package = "Ecdat")
wages <- Griliches
wageFormula <- lw80 ~ age80 + school80 + expr80 + iq + rns80 + mrt80 +
smsa80 + tenure80 + med + kww
# example trainControl function
ctrl1 <- trainControl(method = "repeatedcv", repeats = 5,
allowParallel = TRUE)
# look up model for rf
modelLookup('rf')
modelLookup('xgbTree')
# grid of mtry values to try
rfGrid <- expand.grid(mtry = seq(1, 10, 1))
rfGrid
#example train function
rfTrain <- train(wageFormula,
data = wages,
method = "rf",
trControl = ctrl1,
tuneGrid = rfGrid,
nthread = 4)
# confusion matrix
confusionMatrix(data = test_set$pred,
reference = test_set$obs)
# lift curves
trellis.par.set(caretTheme())
lift_obj <- lift(Class ~ FDA +
LDA + C5.0,
data = lift_results)
plot(lift_obj, values = 60,
auto.key = list(columns = 3,
lines = TRUE,
points = FALSE))
# calibration curves
trellis.par.set(caretTheme())
cal_obj <- calibration(Class ~ FDA + LDA + C5.0,
data = lift_results,
cuts = 13)
plot(cal_obj, type = "l", auto.key = list(columns = 3,
lines = TRUE,
points = FALSE))
# example problem
data(Cracker, package = "Ecdat")
brand <- Cracker
modelLookup("xgbTree")
crackerFormula <- paste("choice ~ ", paste(names(brand)[-c(1, 14)],
collapse = " + "))
ctrl <- trainControl(method = "repeatedcv", repeats = 2, number = 5,
summaryFunction = multiClassSummary,
classProbs = TRUE, allowParallel = FALSE)
crackerGrid <- expand.grid(nrounds = 100,
max_depth = c(2, 4, 8, 10),
eta = c(0.01, 0.1, 0.2),
gamma = 0.5, colsample_bytree = 1,
min_child_weight = 1,
subsample = 0.7)
boostCracker <- train(choice ~ disp.sunshine + disp.kleebler + disp.nabisco +
disp.private + feat.sunshine + feat.kleebler + feat.nabisco +
feat.private + price.sunshine + price.kleebler + price.nabisco +
price.private,
data = brand,
method = "xgbTree",
metric = "Accuracy",
trControl = ctrl,
tuneGrid = crackerGrid,
nthread = 4)
plot(boostCracker)
|
5b52e9b394eec84e1b03a5531020db72350c42cc | b249e9e065611db14c3f7c60998c887ddfdfbec2 | /Validity analysis_CFA.R | b93cb6e5f53801ef5891158a4b0507eaf4c74f7b | [] | no_license | maelliott1010/CFA-ADHD-Park_et_al. | df227c4387aca3f066c2a5eba849799b609c336a | 46f201570348d1726d7df7c50779b763accfb035 | refs/heads/master | 2020-03-10T05:08:12.071897 | 2018-05-15T13:01:51 | 2018-05-15T13:01:51 | 129,210,230 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,191 | r | Validity analysis_CFA.R | Modsa3V<-
'INATT=~ NA*CuSS_S1 + CuSS_S3 + CuSS_S5 + CuSS_S7 + CuSS_S9 + CuSS_S11 + CuSS_S13 + CuSS_S15 + CuSS_S17
IMP=~ NA*CuSS_S14 + CuSS_S16 + CuSS_S18
HYP=~ NA*CuSS_S2 + CuSS_S4 + CuSS_S6 + CuSS_S8 + CuSS_S10 + CuSS_S12
INATT~~1*INATT
IMP~~1*IMP
HYP~~1*HYP
INATT~~HYP
IMP~~INATT
HYP~~IMP
Education ~ INATT+IMP+HYP
BSI_Hostility ~ INATT+IMP+HYP
BSI_Depression ~ INATT+IMP+HYP
APQ_PPComp ~ INATT+IMP+HYP
SELF_APQ_ID ~ INATT+IMP+HYP
'
Runsa3V <- cfa(Modsa3V, dat=BifactorDataset_APQcomposite, missing="FIML", estimator="MLR")
summary(Runsa3V,standardized=TRUE,fit.measures=TRUE, rsquare=TRUE)
Modsa4V <- '
ADHD =~ CuSS_S1+CuSS_S2+CuSS_S3+CuSS_S4+CuSS_S5+CuSS_S6+CuSS_S7+CuSS_S8+CuSS_S9+CuSS_S10+CuSS_S11+CuSS_S12+CuSS_S13+CuSS_S14+CuSS_S15+CuSS_S16+CuSS_S17+CuSS_S18
INATT=~CuSS_S3 + CuSS_S1 + CuSS_S5 + CuSS_S7 + CuSS_S9 + CuSS_S11 + CuSS_S13 + CuSS_S15 + CuSS_S17
HYP=~CuSS_S2 + CuSS_S4 + CuSS_S6 + CuSS_S8 + CuSS_S10 + CuSS_S12 + CuSS_S14+CuSS_S16+CuSS_S18
ADHD~~0*INATT
ADHD~~0*HYP
INATT~~0*HYP
Education ~ ADHD+INATT+HYP
BSI_Hostility ~ ADHD+INATT+HYP
BSI_Depression ~ ADHD+INATT+HYP
APQ_PPComp ~ ADHD+INATT+HYP
SELF_APQ_ID ~ ADHD+INATT+HYP
'
Runsa4V <- cfa(Modsa4V, dat=BifactorDataset_APQcomposite,estimator="MLR", missing="FIML",std.lv=TRUE)
summary(Runsa4V,standardized=TRUE,fit.measures=TRUE, rsquare=TRUE)
Modsa5V <- '
ADHD =~ CuSS_S1+CuSS_S2+CuSS_S3+CuSS_S4+CuSS_S5+CuSS_S6+CuSS_S7+CuSS_S8+CuSS_S9+CuSS_S10+CuSS_S11+CuSS_S12+CuSS_S13+CuSS_S14+CuSS_S15+CuSS_S16+CuSS_S17+CuSS_S18
INATT=~CuSS_S3 + CuSS_S1 + CuSS_S5 + CuSS_S7 + CuSS_S9 + CuSS_S11 + CuSS_S13 + CuSS_S15 + CuSS_S17
HYP=~CuSS_S2 + CuSS_S4 + CuSS_S6 + CuSS_S8 + CuSS_S10 + CuSS_S12
IMP=~CuSS_S14+CuSS_S16+CuSS_S18
ADHD~~0*INATT
ADHD~~0*HYP
ADHD~~0*IMP
INATT~~0*HYP
INATT~~0*IMP
HYP~~0*IMP
Education ~ INATT+IMP+HYP+ADHD
BSI_Hostility ~ INATT+IMP+HYP+ADHD
BSI_Depression ~ INATT+IMP+HYP+ADHD
APQ_PPComp ~ INATT+IMP+HYP+ADHD
SELF_APQ_ID ~ INATT+IMP+HYP+ADHD
'
Runsa5V <- cfa(Modsa5V, dat=BifactorDataset_APQcomposite,estimator="MLR", missing="FIML",std.lv=TRUE)
summary(Runsa5V,standardized=TRUE,fit.measures=TRUE, rsquare=TRUE) |
3df25eed515f3cb37aaad645a7091fe64a0d1e75 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052239-test.R | 93ebb0fc574adc2c4af5bb6207074773f42991e4 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,533 | r | 1610052239-test.R | testlist <- list(rates = 3.60253365100228e-306, thresholds = c(3.90913259233617e+304, 3.65784678046935e-306, 1.01955734498014e-314, 2.12199579096527e-314, -5.48612406879369e+303, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -1.26836459351501e-30, 4.00930495879766e-306, -1.26826378018642e-30, 3.94108708470682e-312, 2.92300471201586e+48, NaN, -2.52435489670724e-29, 1.53948714210656e-314, -6.87272040335292e-310, -7.3933019237596e-287, -8.81443095004589e-280, 5.67154552225164e-310, -2.08936022487344e-289, 7.7456526310928e-304, -1.3906499427506e-309, -9.11306812174856e-306, -5.66292573813001e+303, -1.26836459270255e-30, 3.66148293307183e-314, -1.2341419504043e-30, 3.66343140688495e-305, -1.28184610231322e-30, 9.37339630957792e-312, 1.70257006040729e-313, -3.9759940224262e-34, NaN), x = c(-3.22066789448518e-33, -1.26836459270829e-30, 2.1788294981599e-321, 3.6634314009933e-305, -5.46354689882308e-108, 7.21427931199735e-304, -3.13227758653431e-294, -9.77719780333789e-292, -9.77719780333789e-292, -9.77719780333789e-292, -9.77719780333789e-292, -9.77719780333789e-292, -9.77719780333789e-292, 4.46078843844753e-99, 7.19919233962066e-310, 2.12131683540626e-309, 4.36707825033567e-306, -6.36390391980068e+305, 3.65207087186091e-306, NaN, NaN, 1.39067049844331e-309, 0, 0, 0, 9.70418614810987e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, NaN, NaN, -5.46382419373016e-108, -5.46354690059085e-108, -5.46354690059085e-108, -1.46889178304203e-110, -6.36390391980061e+305, 3.65207087186091e-306, -5.46354690059085e-108, -5.46354690059085e-108, -5.46354690059085e-108, -5.46354690059085e-108, -5.46354690059072e-108, -3.32706936773014e-111, NaN, 3.78573811262366e-270, 7.29112072938316e-304, 3.32652954498878e-111, NaN, NaN, NaN, -7.55234771438901e-287, 2.56647495436053e-301, -3.01363922187472e-135, -2.30331110816476e-156, -2.30331110816477e-156, -2.29449120144681e-156, 3.65207087186085e-306, -5.46354690059072e-108, 1.25817419546951e-60, -5.66365833706557e+303, 5.43230922486616e-312, -3.9759940224262e-34, -1.26823100659151e-30, -1.26836459270829e-30, 2.39422219319154e-301, -2.87284834993229e-188, -2.35322190464904e-185, -5.77729739522216e-275, 7.31960296499841e-304, NaN, 6.98147805439749e-310, -2.67120456398365e+305, 1.39066109167247e-309, 0, 0, 0, 9.23492568228087e-311, 5.79909433230408e-316, 1.44804023176556e+190, -1.75590191507535e+306 ))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
cb8900b35b9e0861c04b1d519fda085d97b09a33 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hydroApps/examples/ARPIEM2012.sim.Lmoments.Rd.R | ab583c57a02fd522b3bd1ced3ab108de3383165f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 367 | r | ARPIEM2012.sim.Lmoments.Rd.R | library(hydroApps)
### Name: ARPIEM2012.sim.Lmoments
### Title: Generazione MonteCarlo degli L-momenti
### Aliases: ARPIEM2012.sim.Lmoments
### ** Examples
## Not run:
##D require(nsRFA)
##D
##D ARPIEM2012.sim.Lmoments(Qind.type="C", LCV_LCA.type="C_C", Qind=10, sdQind=2,
##D LCV=0.25, sdLCV=0.08, LCA=0.4, sdLCA=0.21, n=1000)
##D
## End(Not run)
|
206f8456c1cb70410d58b6ac8e7bd363e7939dad | a9c2a33f984e7972692be4b44929d10c610077ac | /man/integrand_mtot.Rd | 70a625498643b4230fff7584b013606f82189bc9 | [] | no_license | dalerxli/planar | 8d7d2260e3b4cba09477ddea5de3ee9ba741332a | 7fd09b664a2aa53f3665be9b0f63acd73f0d747e | refs/heads/master | 2021-05-31T03:05:24.953902 | 2016-02-29T01:29:53 | 2016-02-29T01:29:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 821 | rd | integrand_mtot.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dipole_integrand.r
\name{integrand_mtot}
\alias{integrand_mtot}
\title{integrand_mtot}
\usage{
integrand_mtot(d = 10, q, wavelength, epsilon = list(incident = 1.5^2, 1^2),
thickness = c(0, 0))
}
\arguments{
\item{d}{distance in nm}
\item{q}{normalised in-plane wavevector in [0, infty)}
\item{wavelength}{wavelength in nm}
\item{epsilon}{list of dielectric functions}
\item{thickness}{list of layer thicknesses}
}
\description{
Total decay rate of a dipole near a multilayer interface
}
\details{
Integrand without transformation of variables
}
\author{
baptiste Auguie
}
\seealso{
Other integrands dipole: \code{\link{integrand_nr1}};
\code{\link{integrand_nr2}}; \code{\link{integrand_nr3}};
\code{\link{integrand_rad}}
}
|
cda98743411e20573818b6fab8488b749f2ff415 | c88e833de8a3fb8a5b3665d3390dc83a0f14aad2 | /abra_lin_regiok.R | 0653aa85bbd14a2e3314e53703102b7107466c31 | [] | no_license | mengjiacsam/szakdolgozat-eltecon | 89b460c5a76c59cfc6ff5d147ef99621d9a163df | 4074b9fbabf85c6bcc490f1fa464536350ab9005 | refs/heads/master | 2023-03-28T22:01:17.549326 | 2021-04-11T12:53:56 | 2021-04-11T12:53:56 | 356,861,193 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,675 | r | abra_lin_regiok.R | # első lépések
library(data.table)
library(ggplot2)
library(RColorBrewer)
library(interplot)
require(ggthemes)
data <- fread("C:/Users/Csaa/Documents/szakdoga_új/R-hez táblázít/mindennel.csv")
# Lineáris regressziós ábrák
# Megtakarítás és türelem
ggplot(data, aes(patience,Saving, colour= Region))+
geom_point()+
xlab("Türelem")+
ylab("Megtakarítás")+
labs(title = "Megtakarítás és türelem régiók szerint", color="Jelmagyarázat")+
theme(axis.title.x = element_text(size = 10),
axis.title.y = element_text(size = 10),
plot.title = element_text(size = 12, face = "bold"),
legend.title = element_text(size = 7),
legend.text = element_text(size = 7))+
stat_smooth(method="lm", se=FALSE, color='black')+
scale_color_manual(values = c("#999999", "magenta", "navy", "deeppink","lawngreen", "yellow","turquoise1"),
labels=c("East Asia & Pacific"="Kelet-Ázsia és Csendes-óceáni térség",
"Europe & Central Asia"="Európa és Eurázsia",
"Latin America & Caribbean"="Dél-Amerika",
"Middle East & North Africa"="Közel-Kelet és Észak-Afrika",
"North America"="Észak-Amerika",
"South Asia"="Dél-Ázsia",
"Sub-Saharan Africa"="Szubszaharai Afrika"))
# Megtakarítás és pénzügyi beilleszkedés
ggplot(data, aes(Accounts,Saving, colour= Region)) +
geom_point() +
xlab('Pénzügyi beilleszkés') +
ylab("Megtakarítás") +
labs(title = "Megtakarítás és pénzügyi beilleszkedés régiók szerint", color="Jelmagyarázat")+
theme(axis.title.x = element_text(size = 10),
axis.title.y = element_text(size = 10),
plot.title = element_text(size = 12, face = "bold"),
legend.title = element_text(size = 7),
legend.text = element_text(size = 7))+
stat_smooth(method="lm", se=FALSE, color='black')+
scale_color_manual(values = c("#999999", "magenta", "navy", "deeppink","lawngreen", "yellow","turquoise1"),
labels=c("East Asia & Pacific"="Kelet-Ázsia és Csendes-óceáni térség",
"Europe & Central Asia"="Európa és Eurázsia",
"Latin America & Caribbean"="Dél-Amerika",
"Middle East & North Africa"="Közel-Kelet és Észak-Afrika",
"North America"="Észak-Amerika",
"South Asia"="Dél-Ázsia",
"Sub-Saharan Africa"="Szubszaharai Afrika"))
|
9615bedcb947f8685bf0c5a47abc240986a5fbf7 | a069ac633de89874a17f89a5059dedaba91724b5 | /plot2.R | 4bfe7b693573b18f82daf160c2f534eb0ee1b927 | [] | no_license | Anticlue/ExData_Plotting1 | 262bb3f3d561fd3e5496880dc0f0b24bf70625bb | 2ceaad662fc40ea6c5d9162e72bf2c10286f8076 | refs/heads/master | 2021-08-08T06:09:56.972265 | 2017-11-09T18:16:45 | 2017-11-09T18:16:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,064 | r | plot2.R | ## plot2.R
## The purpose of this code is to created the second plot for the
## Coursera Exploratory Data Analysis first project assignment.
## Initial Setup
setwd("C:/Users/antic/ExData_Plotting1")
## Downloading and unzipping the dataset
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists("./data")){
dir.create ("./data")}
zipfileName <- "./data/powerdataset.zip"
download.file(fileURL, zipfileName)
unzip(zipfileName, exdir="./data")
powerConsumptionData <- read.csv("./data/household_power_consumption.txt", header=T, sep = ';', na.strings="?")
nov1and2 <- subset(powerConsumptionData, Date %in% c("1/2/2007","2/2/2007"))
nov1and2$Date <- as.Date(nov1and2$Date, format="%d/%m/%Y")
nov1and2 <- transform(nov1and2, timeStamp=as.POSIXct( paste(Date,Time)), "%d/%m/%Y %H:%M:%S")
plot(nov1and2$timeStamp, nov1and2$Global_active_power, type="l", ylab="Global Active Power(kilowatts)", xlab="")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
|
aebf2ad6ae514a636c13fe579560ef9a30d1897f | 5832cb002cb8771d94c4d40a405dfa22c054698d | /cachematrix.R | 3d38d3b9d00e84da39a0e2e1002a25642fcafd99 | [] | no_license | pmcray/ProgrammingAssignment2 | 7b519208b2ff02b9b84a7d7825789931aee6610b | 2187145e289482d2a0efe57f9790c1f6e6d91852 | refs/heads/master | 2020-12-11T04:14:02.799188 | 2015-01-26T00:19:41 | 2015-01-26T00:19:41 | 29,628,265 | 0 | 0 | null | 2015-01-21T23:35:03 | 2015-01-21T23:35:03 | null | UTF-8 | R | false | false | 4,302 | r | cachematrix.R | ## makeCacheMatrix and cachesolve are a pair of functions that have the support the
## creation of and access to the cache of the inverse of a matrix. Inverting a matrix is
## a common computational task. Matrices can be very large and calculating the inverse
## is a potentially computationally intensive process. It can therefore makes sense to
## cache the inverse of a (typically large) matrix if it is used repeatedly in a set of
## calculations.
## The makeCacheMatrix function takes an ordinary numeric matrix and returns an special
## matrix object in the form of a list, the inverse of which is calculated, cached and
## accessed by the cacheSolve function.
makeCacheMatrix <- function(x = matrix()) {
## The input to the makeCacheMatrix function is coerced to be a matrix.
m <- NULL
## Sets the value of a variable m in the function environment to be NULL.
set <- function(y) {
x <<- y
m <<- NULL
}
## Defines the set function of the CacheMatrix object. This takes an argument y and sets
## the values of two variables in the parent environment: x to y, the argument to the set
## function, and m to NULL.
get <- function() x
## Defines the get function of the CacheMatrix object. This returns the value of x in the
## function environment.
setinverse <- function(inverse) m <<- inverse
## Defines the setinverse function of the CacheMatrix object. This takes the value of the
## inverse and sets the value of the variable m in the parent environment to be equal to
## the inverse.
getinverse <- function() m
## Defines the getinverse function of the the CacheMatrix object. This returns the value of
## m, which is either NULL or the value of the inverse if setinverse function has been
## invoked.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
## Constructs the CacheMatrix object, which is a list. This list is return by the function.
## The object has four methods corresponding to the four functions defined in the function
## createCacheMatrix function set, get, setinverse and getinverse.
}
## The cacheSolve function takes a CacheMatrix object produced by the makeCacheMatrix
## function as input. It returns the inverse of the matrix wrapped by the cacheMatrix
## object. If the CacheMatrix object has not been passed to the function before, it
## calculates the inverse, stores it in the cache and returns it and also stores the
## CacheMatrix object. If the CacheMatrix object has been passed to the function before, it
## checks to see if the CacheMatrix object has changed. If it has changed, it calculates
## the inverse, returns it and stores it in the cache, and stores the CacheMatrix object.
## If the CacheMatrix object has not changed, the function retrieves the inverse from the
## cache and returns it.
cacheSolve <- function(x, ...) {
## The first input to the cacheSolve function is a CacheMatrix object.
inverse <- x$getinverse()
## The getinverse() method of the CacheMatrix object is invoked to return the inverse,
## which may be NULL if it has not been calculated yet.
if((!is.null(inverse)) && (identical(x, last_x))) {
## If the inverse is not NULL and the CacheMatrix object has not changed since it was last
## passed, the cached value of the inverse is stil valid.
message("getting cached data")
return(inverse)
## The cached value of the inverse is returned.
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
## If the inverse is NULL (has not yet been calculated) or the CacheMatrix object has
## changed since it was last passed, the get method of the CacheMatrix object is invoked
## to obtain the original matrix. This is then passed to the solve function. The
## setinverse method of the CacheMatrix object is then invoked to set the value of the
## inverse associated with the CacheMatrix object.
last_x <<- x
## As this is either the first time that the CacheMatrix object has been passed to the
## CacheSolve function or the CacheMatrix object has changed, the CacheMatrix object is
## stored to a variable in the parent environment for later comparison as required.
inverse
## Returns the value of the inverse is returned.
} |
7d2160127b6540074ce0f8eb9d58c52b59e2aeb6 | 551d790fecd6637414f472af389827e00f75bc22 | /man/xmuOldPlotIP.Rd | d2b3b5274b424a65c920466593814602a0bf1e93 | [] | no_license | MATA62N/umx | 667a3f227b7153e0664da6a7cb6a15cb3a7d66e5 | 5bcdffb28c699390efa344519cedfdda3d7f9e58 | refs/heads/master | 2023-07-27T00:36:00.321493 | 2021-09-08T14:58:49 | 2021-09-08T14:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,901 | rd | xmuOldPlotIP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmuOldPlotIP.r
\name{xmuOldPlotIP}
\alias{xmuOldPlotIP}
\title{Draw a graphical figure for a Independent Pathway model}
\usage{
xmuOldPlotIP(
x = NA,
file = "name",
digits = 2,
means = FALSE,
std = TRUE,
format = c("current", "graphviz", "DiagrammeR"),
SEstyle = FALSE,
strip_zero = TRUE,
...
)
}
\arguments{
\item{x}{The \code{\link[=umxIP]{umxIP()}} model to plot}
\item{file}{The name of the dot file to write: NA = none; "name" = use the name of the model}
\item{digits}{How many decimals to include in path loadings (defaults to 2)}
\item{means}{Whether to show means paths (defaults to FALSE)}
\item{std}{Whether to standardize the model (defaults to TRUE)}
\item{format}{= c("current", "graphviz", "DiagrammeR")}
\item{SEstyle}{Report "b (se)" instead of "b [lower, upper]" (Default)}
\item{strip_zero}{Whether to strip the leading "0" and decimal point from parameter estimates (default = TRUE)}
\item{...}{Optional additional parameters}
}
\value{
\itemize{
\item optionally return the dot code
}
}
\description{
Options include digits (rounding), showing means or not, standardization, and which output format is desired.
}
\examples{
\dontrun{
require(umx)
data(GFF)
mzData = subset(GFF, zyg_2grp == "MZ")
dzData = subset(GFF, zyg_2grp == "DZ")
selDVs = c("gff","fc","qol","hap","sat","AD") # These will be expanded into "gff_T1" "gff_T2" etc.
m1 = umxIP(selDVs = selDVs, sep = "_T", dzData = dzData, mzData = mzData)
xmuOldPlotIP(model, file = NA)
}
}
\references{
\itemize{
\item \url{https://tbates.github.io}
}
}
\seealso{
\itemize{
\item \code{\link[=plot]{plot()}}, \code{\link[=umxSummary]{umxSummary()}} work for IP, CP, GxE, SAT, and ACE models.
}
\itemize{
\item \code{\link[=umxIP]{umxIP()}}
}
Other umx deprecated:
\code{\link{umx-deprecated}}
}
\concept{umx deprecated}
|
10ed6293f29720cbd0eee1b62d108b41068d9aca | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/dave/examples/vrsit.Rd.R | 470f9176a79ecbeed4740770ed58808b415ae7e7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 180 | r | vrsit.Rd.R | library(dave)
### Name: vrsit
### Title: Vraconnaz time series data, site factors and dates
### Aliases: vrsit
### Keywords: datasets
### ** Examples
data(vrsit)
str(vrsit)
|
80ff5b92c8fddbde7d7775868a8e2647c1ba1317 | 591200b038b46e9639196f06eeeea4d34439a826 | /arima.R | 20a5525e255abc0cb51911432467c07c5514f5ee | [] | no_license | riship2009/Machine_Learning_models_R.programming | b8cbc9c615afd057565e8ce86995f6a9cbc41f51 | 62db5344755cfd059ca8b8f8c31d225301a1a3cc | refs/heads/main | 2023-02-05T16:05:36.375246 | 2020-12-19T13:31:01 | 2020-12-19T13:31:01 | 322,853,997 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 991 | r | arima.R | temperature = nottem
head(temperature,12)
tail(temperature,12)
str(temperature)
class(temperature)
# generating time-series with 12 observations per unit of time
time_series <- ts(temperature, start=c(1920,1), end=c(1939,12), frequency = 12)
time_series
plot(time_series)
# decomposition of time-series into level, trend, seasonality, and noise components
temp_decom <- decompose(time_series)
temp_decom_seasonal <- time_series - temp_decom$seasonal
plot(temp_decom)
plot(temp_decom_seasonal)
#DW Test
install.packages("lmtest")
library(lmtest)
model <- lm(time_series ~ temp_decom$seasonal)
summary(model)
dwtest(model)
# for ARIMA modelling
install.packages("forecast")
library(forecast)
# generatine ARIMA model
auto.arima(time_series) # to determine the order of differencing
reg <- arima(time_series,order=c(1,0,2),seasonal=c(1,1,2))
reg
temp_forecast <- forecast(time_series, model=reg, h=8) # forecasting next 8 months
temp_forecast
|
d0fb96ff31eac4d7856e89e16761c2d463509977 | e56da52eb0eaccad038b8027c0a753d9eb2ff19e | /man/KeptPaths.Rd | 940009a1da41aa1c8ec26ab7f371bc1dbd0ade5c | [] | no_license | ms609/TreeTools | fb1b656968aba57ab975ba1b88a3ddf465155235 | 3a2dfdef2e01d98bf1b58c8ee057350238a02b06 | refs/heads/master | 2023-08-31T10:02:01.031912 | 2023-08-18T12:21:10 | 2023-08-18T12:21:10 | 215,972,277 | 16 | 5 | null | 2023-08-16T16:04:19 | 2019-10-18T08:02:40 | R | UTF-8 | R | false | true | 2,087 | rd | KeptPaths.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KeptPaths.R
\name{KeptPaths}
\alias{KeptPaths}
\alias{KeptPaths.data.frame}
\alias{KeptPaths.matrix}
\title{Paths present in reduced tree}
\usage{
KeptPaths(paths, keptVerts, all = TRUE)
\method{KeptPaths}{data.frame}(paths, keptVerts, all = TRUE)
\method{KeptPaths}{matrix}(paths, keptVerts, all = TRUE)
}
\arguments{
\item{paths}{\code{data.frame} of paths in master tree, perhaps generated using
\code{\link[=PathLengths]{PathLengths()}}.}
\item{keptVerts}{Logical specifying whether each entry is retained in the
reduced tree, perhaps generated using \code{\link[=KeptVerts]{KeptVerts()}}.}
\item{all}{Logical: if \code{TRUE}, return all paths that occur in the reduced
tree; if \code{FALSE}, return only those paths that correspond to a single edge.
that correspond to edges in the reduced tree.
Ignored if \code{paths} is a matrix.}
}
\value{
\code{KeptPaths()} returns a logical vector specifying whether each path
in \code{paths} occurs when \code{keptVerts} vertices are retained.
}
\description{
Lists which paths present in a master tree are present when leaves are
dropped.
}
\examples{
master <- BalancedTree(9)
paths <- PathLengths(master)
keptTips <- c(1, 5, 7, 9)
keptVerts <- KeptVerts(master, keptTips)
KeptPaths(paths, keptVerts)
paths[KeptPaths(paths, keptVerts, all = FALSE), ]
}
\seealso{
Other tree manipulation:
\code{\link{AddTip}()},
\code{\link{CollapseNode}()},
\code{\link{ConsensusWithout}()},
\code{\link{DropTip}()},
\code{\link{EnforceOutgroup}()},
\code{\link{ImposeConstraint}()},
\code{\link{KeptVerts}()},
\code{\link{LeafLabelInterchange}()},
\code{\link{MakeTreeBinary}()},
\code{\link{RenumberTips}()},
\code{\link{RenumberTree}()},
\code{\link{Renumber}()},
\code{\link{RootTree}()},
\code{\link{SortTree}()},
\code{\link{Subtree}()},
\code{\link{TipTimedTree}()},
\code{\link{TrivialTree}}
}
\author{
\href{https://orcid.org/0000-0001-5660-1727}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\concept{tree manipulation}
|
452c9f100638bdf45716a7ac76b667c85ab916ca | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ProjectTemplate/examples/arff.reader.Rd.R | dc9d1df4e7e8aa0c5e8580657ab396581e8c8423 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 205 | r | arff.reader.Rd.R | library(ProjectTemplate)
### Name: arff.reader
### Title: Read the Weka file format.
### Aliases: arff.reader
### ** Examples
## Not run: arff.reader('example.arff', 'data/example.arff', 'example')
|
008d00a2df90e8db70dd61dfeb386be8d1738d11 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /ProFound/man/profoundSkyEst.Rd | 683c784644afda0f78a95532ab7f52b2fefc2d1d | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,450 | rd | profoundSkyEst.Rd | \name{profoundSkyEst}
\alias{profoundSkyEst}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Old Sky Estimator (Somewhat Defunct)
}
\description{
A high level utility to estimate the sky properties of a supplied \option{image}. This is closely related to the equivalent routines available in the LAMBDAR R package.
}
\usage{
profoundSkyEst(image = NULL, objects = NULL, mask = NULL, cutlo = cuthi/2,
cuthi = sqrt(sum((dim(image)/2)^2)), skycut = 'auto', clipiters = 5, radweight = 0,
plot = FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{image}{
Numeric matrix; required, the image we want to analyse. The galaxy should be approximately central within this image since annuli weighting is done to avoid brighter central regions dominated by galaxy flux.
}
\item{objects}{
Boolean matrix; optional, object mask where 1 is object and 0 is sky. If provided, this matrix *must* be the same dimensions as \option{image}.
}
\item{mask}{
Boolean matrix; optional, non galaxy parts of the image to mask out, where 1 means mask out and 0 means use for analysis. If provided, this matrix *must* be the same dimensions as \option{image}.
}
\item{cutlo}{
Numeric scalar; radius where the code will start to calculate the sky annuli around the central object. Should be large enough to avoid significant object flux, i.e. a few times the flux 90 radius. Default is half of \option{cuthi}.
}
\item{cuthi}{
Numeric scalar; radius where the code will stop calculating the sky annuli around the central object. Default is the corner edge of the \option{image}.
}
\item{skycut}{
Numeric scalar; clipping threshold to make on the \option{image} in units of the skyRMS. The default scales the clipping to the number of pixels in the \option{image}, and will usually work reasonably.
}
\item{clipiters}{
Numeric scalar; How many iterative clips of the sky will be made.
}
\item{radweight}{
Numeric scalar; what radius power-law weighting should be used to bias the sky towards sky annuli nearer to the central object. \option{radweight}>0 weight the sky value more towards larger radii and \option{radweight}<0 weight the sky values towards the \option{image} centre. The default of 0 means there is no radial weightings. This becomes clear when plotting the \option{radrun} output (see Examples). Note this behaves differently to the similarly named option in LAMBDAR's sky.estimate.
}
\item{plot}{
Logical; should a diagnostic plot be generated?
}
\item{\dots}{
Further arguments to be passed to \code{\link{magplot}}. Only relevant is \option{plot}=TRUE.
}
}
\details{
This function is closely modelled on the sky.estimate function in the LAMBDAR package (the basic elements of which were written by ASGR). The defaults work well for data where the main objects (usually a galaxy) is centrally located in the \option{image} since the \option{cutlo} default will usually ignore contaminated central pixels. On top of this it does pretty aggressive object pixel rejection using the \option{skycut} and \option{clipiters} options.
The defaults should work reasonably well on modern survey data (see Examples), but should the solution not be ideal try modifying these parameters (in order of impact priority): \option{skycut}, \option{cutlo}, \option{radweight}, \option{clipiters}.
It is interesting to note that a better estimate of the sky RMS can be made by using the output of \code{\link{profoundImDiff}} (see Examples).
}
\value{
Returns a list with 5 elements:
\item{sky}{The value of the estimated sky.}
\item{skyerr}{The estimated uncertainty in the sky level.}
\item{skyRMS}{The RMS of the sky pixels.}
\item{Nnearsky}{The number of sky annuli that have error bars encompassing the final sky.}
\item{radrun}{The output of \code{\link{magrun}} for radius versus sky pixels values.}
}
\author{
Aaron Robotham
}
\seealso{
\code{\link{profoundMakeSegim}}, \code{\link{profoundMakeSegimExpand}}
}
\examples{
\dontrun{
image = readFITS(system.file("extdata", 'KiDS/G266035fitim.fits',
package="ProFit"))$imDat
sky1 = profoundSkyEst(image, plot=TRUE)
image_sky = image-sky1$sky
sky2 = profoundSkyEst(profoundImDiff(image_sky), plot=TRUE)
#You can check whether you are contaminated by the central objects by plotting the radrun
#object in the list (it should be flat for a well behaved sky):
sky = profoundSkyEst(image, cutlo=0, plot=TRUE)
magplot(sky$radrun)
abline(h=sky$sky)
#The above shows heavy contamination by the central object without. We can either mask
#this out using the output of profoundSegImWatershed, set cutlo to be larger or weight
#the sky towards outer annuli.
profound=profoundProFound(image)
sky = profoundSkyEst(image, mask=profound$objects, cutlo=0, plot=TRUE)
magplot(sky$radrun)
abline(h=sky$sky)
#The above is better, but not great. A more aggressive mask helps:
sky = profoundSkyEst(image, mask=profound$objects_redo, cutlo=0, plot=TRUE)
magplot(sky$radrun)
abline(h=sky$sky)
#Or weighting the sky to outer radii
sky = profoundSkyEst(image, mask=profound$objects, cutlo=0, radweight=1, plot=TRUE)
magplot(sky$radrun)
abline(h=sky$sky)
#Finally we can leave the central cutlo mask turned on:
sky = profoundSkyEst(image, mask=profound$objects, plot=TRUE)
magplot(sky$radrun)
abline(h=sky$sky)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\concept{ sky }% use one of RShowDoc("KEYWORDS")
|
a3aa7b2908bdabdab81d36ad0249b983f0fe4d24 | 24bd76f6ee58fb77f822db0c904e8b75fae6a670 | /CA-3.R | b400cd50a4ac61ba86d37b731cbfe495da5517b3 | [] | no_license | amul-upadhyay/CA-3 | 8d159f9c534750b786df69cae4f74f97a566b58c | f955af8c0941fbe163186efff627b939fd56649c | refs/heads/master | 2020-05-24T23:19:47.408852 | 2019-05-19T18:24:13 | 2019-05-19T18:24:13 | 187,218,214 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,525 | r | CA-3.R |
# Locading The needed Librabries
library(MASS)
library(plyr)
library(ggplot2)
library(qplot)
#Reading Propert price.csv file and unemployment.csv file
ppr_df <- read.csv("Property Prices.csv", header = TRUE)
unemployment_df <- read.csv("data.csv", header = , skip = 4)
# Printing the top 5 entries of both dataframes
head(ppr_df)
head(unemployment_df)
#Assigning Column names to Unemployemnt dataframe
names(unemployment_df)[1] <- "Year"
names(unemployment_df)[2] <- "Unemployment_Rate"
#Assigning Column names to ppr_df dataframe
names(ppr_df)[1] <- "Year"
names(ppr_df)[2] <- "Address"
names(ppr_df)[3] <- "Postal_Code"
names(ppr_df)[4] <- "County"
names(ppr_df)[5] <- "Price"
names(ppr_df)[6] <- "Not_Full_Market_Price"
names(ppr_df)[7] <- "Vat_Exclusive"
names(ppr_df)[8] <- "Is_Property_New"
names(ppr_df)[9] <- "Poperty_Size_Description"
# Dropping Unneccessary columns
drops <- c("Address", "Postal_Code","Not_Full_Market_Price", "Vat_Exclusive", "Poperty_Size_Description")
ppr_df <- ppr_df[ , !(names(ppr_df) %in% drops)]
# Checking Final column names
colnames(ppr_df)
# Removing month and dat from the date column and keeping only year
ppr_df$Year <- format(as.Date(ppr_df$Year, format="%d/%m/%Y"),"%Y")
# Removing Currency symbol from the Price Column
ppr_df$Price <- as.numeric(gsub('[€ ,]', '', ppr_df$Price))
#Checking the Structure of Dataframe
str(ppr_df)
# Taking Average of Property Prices and Grouping them by year
ppr_df <- aggregate(ppr_df[, 3], list(ppr_df$Year), mean)
head(ppr_df)
colnames(ppr_df)
# Assigning New Column names
names(ppr_df)[1] <- "Year"
names(ppr_df)[2] <- "Price"
# Merging Unemployenet dataframe to the ppr_df dataframe
ppr_df <- merge(ppr_df, unemployment_df, by = "Year")
# Checking head
head(ppr_df)
# Adding a Categorical column where Unemployment rate greater than 7 will be treated as Yes
# and less will be treated as No
ppr_df$Is_Unenployment_Greater_Than_7 <- ifelse(ppr_df$Unemployment_Rate >=7, "Yes", "No")
#Checking Structure
str(ppr_df)
# Testing differences in means
#One of the most common statistical tasks is to compare an outcome between two groups.
#The example here looks at comparing Property Prices When Unemployment rate is grater than 7 and when it is less than 7.
# Create boxplot showing how Property Prices varies between
# Umemployment status
qplot(x = as.factor(Is_Unenployment_Greater_Than_7), y = Price,
geom = "boxplot", data = ppr_df,
xlab = "Is Unemployment greater than 7",
ylab = "Price in Euros",
fill = I("lightblue"))
install.packages("car")
library(car)
leveneTest(ppr_df$Price~ppr_df$Is_Unenployment_Greater_Than_7)
# The Plot clearly suggests that Unemployment rate plays a role in Property Prices
# How can we assess whether this difference is statistically significant?
# Let’s compute a summary table
ddply(ppr_df, ~ Is_Unenployment_Greater_Than_7, summarize,
mean.price = mean(Price),
sd.price = sd(Price)
)
#The standard deviation is good to have, but to assess statistical significance
# we really want to have the standard error (which the standard deviation adjusted by the group size).
ddply(ppr_df, ~ Is_Unenployment_Greater_Than_7, summarize,
group.size = length(Price),
mean.price = mean(Price),
sd.price = sd(Price),
se.mean.price = sd.price / sqrt(group.size)
)
# This difference is looking quite significant.
#To run a two-sample t-test, we can simple use the t.test() function.
ppr_df.t.test <- t.test(Price ~ Is_Unenployment_Greater_Than_7, data = ppr_df)
ppr_df.t.test
names(ppr_df.t.test)
ppr_df.t.test$p.value
ppr_df.t.test$estimate # group means
ppr_df.t.test$conf.int # confidence interval for difference
attr(ppr_df.t.test$conf.int, "conf.level") # confidence level
#The ability to pull specific information from the output of the hypothesis test allows
# you to report your results using inline code chunks. That is,
# you don’t have to hardcode estimates, p-values, confidence intervals, etc.
# Calculate difference in means between smoking and nonsmoking groups
ppr_df.t.test$estimate
pprdf.unemployment.diff <- round(ppr_df.t.test$estimate[1] - ppr_df.t.test$estimate[2], 1)
pprdf.unemployment.diff
# Confidence level as a %
conf.level <- attr(ppr_df.t.test$conf.int, "conf.level") * 100
conf.level
# Our study finds that when Unemployment rate is less than 7
# Prpperty Prices are on average 61598.9 higher.
# What is statistical significance testing doing?
|
4804edf954c6926365a639e248c86c3606ec7b5d | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/chillR/R/make_hourly_temps.R | 3e2bb47f84a563022988f056d43c252a0227bd41 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,598 | r | make_hourly_temps.R | #' Make hourly temperature record from daily data
#'
#' This function generates hourly temperature records for a particular location
#' from daily minimum and maximum temperatures and latitude.
#'
#' Temperature estimates are based on an idealized daily temperature curve that
#' uses a sine curve for daytime warming and a logarithmic decay function for
#' nighttime cooling. The input data frame can have more columns, which are
#' preserved, but ignored in the processing. References to papers outlining the
#' procedures are given below.
#'
#' Note that this function should be able to generate hourly temperatures for
#' all latitudes, but it uses an algorithm designed for locations with regular
#' day/night behavior. It may therefore be that the curves aren't very realistic
#' for very short or very long days, or especially for polar days and nights.
#'
#' @param latitude the geographic latitude (in decimal degrees) of the location
#' of interest
#' @param year_file a data frame containing data on daily minimum temperature
#' (called Tmin), daily maximum temperature (called Tmax), and date
#' information. Dates can either be specified by two columns called Year and
#' JDay, which contain the Year and Julian date (day of the year), or as three
#' columns called Year, Month and Day. year_file cannot have any missing
#' values, so it may be a good idea to process the relevant columns with
#' make_all_day_table and interpolate_gaps before.
#' @param keep_sunrise_sunset boolean variable indicating whether information
#' on sunrise, sunset and daylength, which is calculated for producing hourly
#' temperature records, should be preserved in the output. Defaults to FALSE.
#' @return data frame containing all the columns of year_file, plus 24 columns
#' for hourly temperatures (called Hour_1 ... Hour_24).
#' @author Eike Luedeling
#' @references Luedeling E, Kunz A and Blanke M, 2013. Identification of
#' chilling and heat requirements of cherry trees - a statistical approach.
#' International Journal of Biometeorology 57,679-689.
#'
#' Luedeling E, Girvetz EH, Semenov MA and Brown PH, 2011. Climate change
#' affects winter chill for temperate fruit and nut trees. PLoS ONE 6(5),
#' e20155.
#'
#' The temperature interpolation is described in
#'
#' Linvill DE, 1990. Calculating chilling hours and chill units from daily
#' maximum and minimum temperature observations. HortScience 25(1), 14-16.
#'
#' Calculation of sunrise, sunset and daylength was done according to
#'
#' Spencer JW, 1971. Fourier series representation of the position of the Sun.
#' Search 2(5), 172.
#'
#' Almorox J, Hontoria C and Benito M, 2005. Statistical validation of
#' daylength definitions for estimation of global solar radiation in Toledo,
#' Spain. Energy Conversion and Management 46(9-10), 1465-1471)
#' @keywords utility
#' @examples
#'
#' weather<-fix_weather(KA_weather)
#'
#' THourly<-make_hourly_temps(50.4,weather$weather)
#'
#' #in most cases, you're probably better served by stack_hour_temperatures
#'
#' @export make_hourly_temps
make_hourly_temps <-
function (latitude,year_file,keep_sunrise_sunset=FALSE)
{
if(missing(latitude)) stop("'latitude' not specified")
if(length(latitude)>1) stop("'latitude' has more than one element")
if(!is.numeric(latitude)) stop("'latitude' is not numeric")
if(latitude>90|latitude<(-90)) warning("'latitude' is usually between -90 and 90")
year_file<-year_file[which(!is.na(year_file$Tmin)&!is.na(year_file$Tmax)),]
if(!"JDay" %in% colnames(year_file))
year_file[,"JDay"]<-strptime(paste(year_file$Month,"/",year_file$Day,"/",year_file$Year,sep=""),"%m/%d/%Y")$yday+1
preserve_columns<-colnames(year_file)
Day_times<-daylength(latitude=latitude,
JDay=c(year_file$JDay[1]-1,
year_file$JDay,
year_file$JDay[nrow(year_file)]+1))
Day_times$Sunrise[which(Day_times$Sunrise==99)]<-0
Day_times$Sunrise[which(Day_times$Sunrise==-99)]<-12
Day_times$Sunset[which(Day_times$Sunset==99)]<-24
Day_times$Sunset[which(Day_times$Sunset==-99)]<-12
year_file$Sunrise<-Day_times$Sunrise[2:(length(Day_times$Sunrise)-1)]
year_file$Sunset<-Day_times$Sunset[2:(length(Day_times$Sunset)-1)]
year_file$Daylength<-Day_times$Daylength[2:(length(Day_times$Daylength)-1)]
year_file$prev_Sunset<-Day_times$Sunset[1:(length(Day_times$Sunset)-2)]
year_file$next_Sunrise<-Day_times$Sunrise[3:length(Day_times$Sunrise)]
year_file$prev_max<-year_file$Tmax[c(NA,1:(nrow(year_file)-1))]
year_file$next_min<-year_file$Tmin[c(2:nrow(year_file),NA)]
year_file$prev_min<-year_file$Tmin[c(NA,1:(nrow(year_file)-1))]
year_file$Tsunset<-year_file$Tmin+(year_file$Tmax-year_file$Tmin)*
sin((pi*(year_file$Sunset-year_file$Sunrise)/(year_file$Daylength+4)))
year_file$prev_Tsunset<-year_file$prev_min+(year_file$prev_max-year_file$prev_min)*
sin((pi*(year_file$Daylength)/(year_file$Daylength+4)))
colnum<-ncol(year_file)+1
hourcol<-c(colnum:(colnum+23))
for (hour in 0:23)
{
hourcount<-hour+1
#if(length(which(year_file$Daylength==-99))>0)
#{
no_riseset<-which(year_file$Daylength %in% c(0,24,-99))
year_file[no_riseset,colnum+hour]<-((year_file$Tmax+year_file$Tmin)/2)[no_riseset]
#}
c_morn<-which(hour<=year_file$Sunrise)
if(1 %in% c_morn)
if(!length(c_morn)==1)
c_morn<-c_morn[2:length(c_morn)]
else c_morn<-c() #can't compute temperatures before sunrise for day 1
c_day<-which(hour>year_file$Sunrise&hour<=year_file$Sunset)
c_eve<-which(hour>=year_file$Sunset)
if(nrow(year_file) %in% c_eve) c_eve<-c_eve[1:(length(c_eve)-1)] #can't compute temperatures after sunset for last day
year_file[c_morn,colnum+hour]<-
year_file$prev_Tsunset[c_morn]- #prev temp at sunset
((year_file$prev_Tsunset[c_morn]-year_file$Tmin[c_morn])/
log(max(1,24-(year_file$prev_Sunset[c_morn]-year_file$Sunrise[c_morn])))*
log(hour+24-year_file$prev_Sunset[c_morn]+1))
year_file[c_day,colnum+hour]<-
year_file$Tmin[c_day]+
(year_file$Tmax[c_day]-year_file$Tmin[c_day])*
sin((pi*(hour-year_file$Sunrise[c_day])/
(year_file$Daylength[c_day]+4)))
year_file[c_eve,colnum+hour]<-
year_file$Tsunset[c_eve]- #temp at sunset
((year_file$Tsunset[c_eve]-year_file$next_min[c_eve])/
log(24-(year_file$Sunset[c_eve]-year_file$next_Sunrise[c_eve])+1)*
log(hour-year_file$Sunset[c_eve]+1))
}
colnames(year_file)[(ncol(year_file)-23):(ncol(year_file))]<-c(paste("Hour_",0:23,sep=""))
if (!keep_sunrise_sunset)
year_file<-year_file[,c(preserve_columns,paste("Hour_",0:23,sep=""))]
if (keep_sunrise_sunset)
year_file<-year_file[,c(preserve_columns,"Sunrise","Sunset","Daylength",paste("Hour_",0:23,sep=""))]
year_file[1,(ncol(year_file)-23):(ncol(year_file))][which(is.na(year_file[1,(ncol(year_file)-23):(ncol(year_file))]))]<-year_file[1,"Tmin"]
year_file[nrow(year_file),(ncol(year_file)-23):(ncol(year_file))][which(is.na(year_file[nrow(year_file),(ncol(year_file)-23):(ncol(year_file))]))]<-year_file[nrow(year_file),"Tmin"]
return(year_file)
}
|
f4a99241a430f1c3b7bfadef6882147cc10ba00e | cfd3f502feb926f7754d9913a543c7c4378decd9 | /Portfolio.R | d03bf676f87682a3b88305ccc61dec61c6c562f0 | [] | no_license | maxclchen/Pairs_Trading-1 | 0a0f26215116104461dcdea07b615c7562d99f66 | 028c6214216151e622ba5f088d3683b55871699e | refs/heads/master | 2022-04-17T13:05:26.205310 | 2018-03-08T23:24:19 | 2018-03-08T23:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,194 | r | Portfolio.R | # Portfolio class
# A Portfolio object consists of a list of CurrencyPair objects and some generic methods that can be invoked on a Portfolio
Portfolio <- setClass(
# Set the name for the class
"Portfolio",
# Define the slots
slots=c(currencyPairList = "list"),
# Define prototype
prototype=list( currencyPairList=list() ), # Create Portfolio object with empty CurrencyPair list
validity=function(object)
{
# Object and data validation checks can be performed here.
# For simplicity, we will not implement any checks and insure validaty of the data in the main program
return(TRUE);
}
)
setGeneric(name="getCurrencyPairList",def=function(theObject){ standardGeneric("getCurrencyPairList")})
# This function fetches the list of CurrencyPair objects that are part of this portfolio
setMethod(f="getCurrencyPairList",signature="Portfolio",definition=function(theObject)
{
return(theObject@currencyPairList)
})
setGeneric(name="setCurrencyPairList",def=function(theObject,cList){ standardGeneric("setCurrencyPairList")})
# This method sets the list of CurrencyPair objects that are part of this portfolio
setMethod(f="setCurrencyPairList",signature=c("Portfolio","list"),definition=function(theObject,cList)
{
theObject@currencyPairList <- cList
validObject(theObject)
return(theObject)
})
setGeneric(name="findMinimumTimeStamp",def=function(theObject){ standardGeneric("findMinimumTimeStamp") })
# This function returns the earliest timestamp for which there is data for at least one of the CurrencyyPair objects in the Portfolio
setMethod(f="findMinimumTimeStamp",signature=c("Portfolio"),definition=function(theObject)
{
cList <- getCurrencyPairList(theObject)
globalMinimum = getTimeSeriesData(cList[[1]])$Time[1]; # Minimum timestamp for the first currencypair
for(i in 2:length(cList)) # Loop over all currency pairs in the list and find global minimum
{
minimum = getTimeSeriesData(cList[[i]])$Time[1];
if(minimum < globalMinimum)
globalMinimum = minimum;
}
return(globalMinimum)
})
setGeneric(name="findMaximumTimeStamp",def=function(theObject){ standardGeneric("findMaximumTimeStamp") })
# This method returns the latest timestamp for which there is data for at least one of the CurrencyPair objects in the Portfolio
setMethod(f="findMaximumTimeStamp",signature=c("Portfolio"),definition=function(theObject)
{
cList <- getCurrencyPairList(theObject)
timeStamps = getTimeSeriesData(cList[[1]])$Time
globalMaximum = timeStamps[length(timeStamps)] # Maximal timestamp for the first CurrencyPair object of the Portfolio
for(i in 2:length(cList)) # Loop over all other CurrenyPair objects in the portfolio
{
timeStamps = getTimeSeriesData(cList[[i]])$Time
maximum = timeStamps[length(timeStamps)]
if(maximum > globalMaximum)
globalMaximum = maximum;
}
return(globalMaximum)
})
setGeneric(name="getAvailablePairsInTimeWindow", def=function(theObject,begin,end){ standardGeneric("getAvailablePairsInTimeWindow")})
# This method returns a new subPortfolio consisting of CurrencyPair objects from the original Portfolio for which data is fully available in the [begin:end] interval
# Note: the [begin:end] data-interval is NOT filtered in the resulting subportfolio.
# Hence, Full data intervals for the relevant CurrencyPair objects is returned
setMethod(f="getAvailablePairsInTimeWindow", signature=c("Portfolio","POSIXlt","POSIXlt"), definition=function(theObject,begin,end)
{
resultList = list() # Create empty CurrencyPair list
cList <- getCurrencyPairList(theObject) # Fetch CurrencyPair list associated to this Portfolio
for(i in 1:length(cList))
{
# Check if CurrencyPair i of the current portfolio contains data inside the [begin:end] interval
if(containsDataInterval(cList[[i]],begin,end))
resultList[length(resultList)+1] <- cList[[i]] # We add the CurrencyPair object to the resultList
}
# We return a new sub Portfolio that consists of the CurrencyPair objects in the resultList
return(Portfolio(currencyPairList=resultList))
})
setGeneric(name="copySubIntervalPortfolio", def=function(theObject,begin,end,fullIntervalRequired=TRUE){ standardGeneric("copySubIntervalPortfolio") })
# This method returns a new subPortfolio consisting of CurrencyPair objects from the original Portfolio.
# However, only data in the [begin:end] is retained for the CurrencyPair objects in the new subPortfolio
# If filterCurrencyPairs==TRUE, then CurrencyPairs that do not have complete data in the [begin:end] interval are removed from the subPortfolio
setMethod(f="copySubIntervalPortfolio", signature=c("Portfolio","POSIXlt","POSIXlt"), definition=function(theObject,begin,end,fullIntervalRequired=TRUE)
{
if(fullIntervalRequired) # Create subPortfolio of CurrencyPair objects that have complete data in [begin:end] interval and fetch the CurrencyPair list
cList <- getCurrencyPairList(getAvailablePairsInTimeWindow(theObject,begin,end))
else
cList <- getCurrencyPairList(theObject)
nrEntries <- length(cList)
resultList <- vector('list',nrEntries)
for(i in 1:nrEntries)
resultList[[i]] <- copySubInterval(cList[[i]],begin,end)
# Return the resulting Portfolio object
resultPortfolio <- setCurrencyPairList(theObject,resultList)
validObject(resultPortfolio)
return(resultPortfolio)
})
setGeneric(name="cleanPortfolio", def=function(theObject,begin,end){ standardGeneric("cleanPortfolio") })
# Remove all data entrys from the input portfolio in the [begin,end] interval where volume == 0
# Important note: This method assumes that TimeSeries data of individiual CurrencyPairs in the portfolio are alligned with eachother
# (The portfolio's are structured according to this assumption during the control flow of the main project)
setMethod(f="cleanPortfolio",signature=c("Portfolio","POSIXlt","POSIXlt"),definition=function(theObject,begin,end)
{
cList <- getCurrencyPairList(theObject)
# Maintain entrys outside of the [begin:end] interval
logicalVectorTime <- (getTimeSeriesData(cList[[1]])$Time[1] < begin | getTimeSeriesData(cList[[1]])$Time > end)
# Maintain values for which the volume indicator > 0, for the first CurrencyPair object
logicalVectorVolume <- (getTimeSeriesData(cList[[1]])$Volume > 0)
# Volume values of every other CurrencyPair object in the cList must also be > 0, otherwise the corresponding data entry must be removed
for(i in seq(2:length(cList)))
logicalVectorVolume <- (logicalVectorVolume & getTimeSeriesData(cList[[i]])$Volume > 0)
# Check if data is outside of the [begin:end] interval and/or volume values of all CurrencyPair objects are > 0
logicalVectorEndResult = (logicalVectorTime | logicalVectorVolume)
resultList = vector("list",length(cList))
# We only keep the time series data for which the conditions are met
for(i in seq(1:length(cList)))
resultList[[i]] <- setTimeSeriesData(cList[[i]],getTimeSeriesData(cList[[i]])[logicalVectorEndResult,])
# Return a portfolio that contains the cleaned CurrencyPair objects
resultPortfolio <- setCurrencyPairList(theObject,resultList)
validObject(resultPortfolio)
return(resultPortfolio)
})
setGeneric(name="getAllPrices", def=function(theObject,begin,end){ standardGeneric("getAllPrices") })
# This method adds the bidprices of all individual CurrencyPairs in a dataframe and returns it
setMethod(f="getAllPrices",signature=c("Portfolio","POSIXlt","POSIXlt"),definition=function(theObject,begin,end){
cList <- getCurrencyPairList(theObject)
# Create dataframe with bidprices of the first CurencyPair inside the [begin:end] interval
logicalVector <- (getTimeSeriesData(cList[[1]])$Time >= begin & getTimeSeriesData(cList[[1]])$Time <= end)
bidPrices <- getTimeSeriesData(cList[[1]])$Bid[logicalVector]
prices <- data.frame(bidPrices)
colnames(prices)[1] <- getIdentifier(cList[[1]])
# Append bidprices of all other CurrencyPairs to the dataframe
for(i in 2:length(cList))
{
logicalVector <- (getTimeSeriesData(cList[[i]])$Time >= begin & getTimeSeriesData(cList[[i]])$Time <= end)
bidPrices <- getTimeSeriesData(cList[[i]])$Bid[logicalVector]
prices <- cbind(prices,bidPrices)
colnames(prices)[i] <- getIdentifier(cList[[i]])
}
return(prices)
})
setGeneric(name="getAllTransactionCosts", def=function(theObject,begin,end){ standardGeneric("getAllTransactionCosts") })
# This method adds the TransactionCosts of all individual CurrencyPairs in a dataframe and returns it
setMethod(f="getAllTransactionCosts",signature=c("Portfolio","POSIXlt","POSIXlt"),definition=function(theObject,begin,end){
cList <- getCurrencyPairList(theObject)
# Create dataframe with transactionCosts of the first CurencyPair inside the [begin:end] interval
logicalVector <- (getTimeSeriesData(cList[[1]])$Time >= begin & getTimeSeriesData(cList[[1]])$Time <= end)
costs <- getTimeSeriesData(cList[[1]])$TransactionCost[logicalVector]
transactionCosts <- data.frame(costs)
colnames(transactionCosts)[1] <- getIdentifier(cList[[1]])
# Append transactionCosts of all other CurrencyPairs to the dataframe
for(i in 2:length(cList))
{
logicalVector <- (getTimeSeriesData(cList[[i]])$Time >= begin & getTimeSeriesData(cList[[i]])$Time <= end)
costs <- getTimeSeriesData(cList[[i]])$TransactionCost[logicalVector]
transactionCosts <- cbind(transactionCosts,costs)
colnames(transactionCosts)[i] <- getIdentifier(cList[[i]])
}
return(transactionCosts)
})
setGeneric(name="plotCurrencyPairsSeparately", def=function(theObject,begin,end){ standardGeneric("plotCurrencyPairsSeparately") })
# This method plots the prices of the individual pairs in subplots
setMethod(f="plotCurrencyPairsSeparately",signature=c("Portfolio","POSIXlt","POSIXlt"),definition=function(theObject,begin,end){
cList <- getCurrencyPairList(theObject)
sizeCollection = length(cList)
# Calculate the number of rows and columns needed to create the subplots
nrColumns = ceiling(sqrt(sizeCollection))
nrRows <- nrColumns
if((nrRows-1)*nrColumns >= sizeCollection)
nrRows <- nrRows-1
par(mfrow=c(nrRows,nrColumns))
for(i in 1:sizeCollection)
plotCurrencyPair(cList[[i]],begin,end)
par(mfrow=c(1,1))
})
setGeneric(name="plotCurrencyPairsTogether", def=function(theObject,begin,end){ standardGeneric("plotCurrencyPairsTogether") })
# This method plots the prices of the individual pairs together in one plot
setMethod(f="plotCurrencyPairsTogether",signature=c("Portfolio","POSIXlt","POSIXlt"),definition=function(theObject,begin,end){
cList <- getCurrencyPairList(theObject)
sizeCollection = length(cList)
logicalVectorTime <- (getTimeSeriesData(cList[[1]])$Time[1] >= begin & getTimeSeriesData(cList[[1]])$Time <= end)
res <- getTimeSeriesData(cList[[1]])$Bid[logicalVectorTime]
for(i in 2:sizeCollection)
res <- cbind(res,getTimeSeriesData(cList[[i]])$Bid[logicalVectorTime])
timeInterval <- getTimeSeriesData(cList[[1]])$Time[logicalVectorTime]
par(mfrow=c(1,1))
plot(timeInterval,res[,1],ylim=range(res),type='l',xlab="Time",ylab="Prices",col=1)
for(i in 2:sizeCollection)
{
par(new=TRUE)
plot(timeInterval,res[,i],ylim=range(res),axes=FALSE,xlab="",ylab ="",type='l',col=i)
}
})
# This function takes a collection of CointegrationTestPortfolio objects as inputs and plots the spreads
plotCointegratedSpreads <- function(cointegrationResultCollection)
{
sizeCollection = length(cointegrationResultCollection)
# Calculate the number of rows and columns needed to create the subplots
nrColumns = ceiling(sqrt(sizeCollection))
nrRows <- nrColumns
if((nrRows-1)*nrColumns >= sizeCollection)
nrRows <- nrRows-1
par(mfrow=c(nrRows,nrColumns))
# Iterate over all the CointegrationResults and plot the Spread
for(i in 1:sizeCollection)
plotSpread(cointegrationResultCollection[[i]])
}
# CointegrationTestPortfolio Inherits from the Portfolio baseclass.
# Objects of this class represent Portfolio's on which specific Cointegration related functions and operations can be performed
CointegrationTestPortfolio <- setClass(
# Set the name for the class
"CointegrationTestPortfolio",
# cointegrationResults slot of class ca.jo
slots=c(cointegrationResults = "ca.jo"),
# Default validity function
validity=function(object){ return(TRUE); },
# Inherit from Portfolio base class
contains="Portfolio"
)
setGeneric(name="cointegrationTestPortfolioCopyConstructor",def=function(theObject,portfolio){ standardGeneric("cointegrationTestPortfolioCopyConstructor")})
# This function copies and transforms a basic Portfolio object into a CointegrationTestPortfolio
# Note: function is not used in project control flow
setMethod(f="cointegrationTestPortfolioCopyConstructor",signature=c("CointegrationTestPortfolio","Portfolio"),definition=function(theObject,portfolio)
{
# Copy the currency pair list
cointegrationTestPortfolio <- setCurrencyPairList(theObject,getCurrencyPairList(portfolio))
# Check if individual pairs in the portfolio are I(1)
if(individualPairsI1(cointegrationTestPortfolio,begin,end))
{
# Perform cointegration test on portfolio. Set the cointegration testresults
cointegrationTestPortfolio <- performJohansenProcedureForCointegration(cointegrationTestPortfolio,begin,end)
}
validObject(cointegrationTestPortfolio)
return(cointegrationTestPortfolio)
})
setGeneric(name="getCointegrationResults",def=function(theObject){ standardGeneric("getCointegrationResults")})
# This function fetches the cointegration testresults for this Portfolio
setMethod(f="getCointegrationResults",signature="CointegrationTestPortfolio",definition=function(theObject)
{
return(theObject@cointegrationResults)
})
setGeneric(name="setCointegrationResults",def=function(theObject,results){ standardGeneric("setCointegrationResults")})
# This function sets the cointegration testresults for this Portfolio
setMethod(f="setCointegrationResults",signature=c("CointegrationTestPortfolio","ca.jo"),definition=function(theObject,results)
{
theObject@cointegrationResults <- results
validObject(theObject)
return(theObject)
})
setGeneric(name="individualPairsI1", def=function(theObject,begin,end){ standardGeneric("individualPairsI1") })
# This method returns true when all the CurrencyPair objects in the Portfolio are integrated of order 1 in the [begin:end] interval
setMethod(f="individualPairsI1",signature=c("CointegrationTestPortfolio","POSIXlt","POSIXlt"), definition=function(theObject,begin,end)
{
cList <- getCurrencyPairList(theObject)
for(i in seq(1,length(cList)))
{
if(!isI1(cList[[i]],begin,end))
{
print(getIdentifier(cList[[i]]))
return(FALSE)
}
}
return(TRUE);
})
setGeneric(name="performJohansenProcedureForCointegration", def=function(theObject,begin,end){ standardGeneric("performJohansenProcedureForCointegration") })
# This method performs the johansen procedure for cointegration on the [begin:end] interval of the Portfolio and fills in the cointegrationResults slot with the results
# Only the data interval for which the test was performed is retained for the resulting Portfolio
setMethod(f="performJohansenProcedureForCointegration",signature=c("CointegrationTestPortfolio","POSIXlt","POSIXlt"),definition=function(theObject,begin,end){
prices <- getAllPrices(theObject,begin,end)
# We first find optimal VAR solution lag length by minimizing an information criterion obtained for different lag length models
# SC information criterion is used for consistency since we have big sample sizes --> Will deliver asymptotically correct results (but is inefficient).
# Include intercept in model: Allow trend in data generating process for the levels of the prices.
varest <- VAR(prices,p=1,type="const",lag.max=24, ic="SC")
# in the Johansen procedure for cointegration a lagged VAR (VECM) is used. Hence we need to subtract 1 from the optimal VAR lag length.
lagLength <- max(2,varest$p-1)
# Perform Johansen procedure for cointegration
# Allow intercepts in the cointegrating vector: data without zero mean
# Use trace statistic (null hypothesis: number of cointegrating vectors <= r)
res <- ca.jo(prices,type="trace",ecdet="const",K=lagLength,spec="longrun")
# Create copy of the portfolio that only contains data for the [begin:end] interval for which the cointegration was tested
cointegrationTestedPortfolio <- copySubIntervalPortfolio(theObject,begin,end,FALSE)
# Set the cointegration testresults
cointegrationTestedPortfolio <- setCointegrationResults(cointegrationTestedPortfolio,res)
# Return the resulting CointegrationTestPortfolio
return(cointegrationTestedPortfolio)
})
setGeneric(name="isCointegrated",def=function(theObject){ standardGeneric("isCointegrated") })
# This method returns true when the Portfolio is cointegrated with at least 90% confidence, FALSE otherwise
# Note: the function assumes that the johansen procedure was executed on the Portfolio
setMethod(f="isCointegrated",signature="CointegrationTestPortfolio",definition=function(theObject)
{
testStatistics <- getCointegrationResults(theObject)@teststat
criticalValues <- getCointegrationResults(theObject)@cval
# chi^2. If testStatic for r<= 0 is greater than the corresponding criticalValue, then r<=0 is rejected and we have at least one cointegrating vector
# Note: trace statistic implicitly assumed, as was implemented during the control flow of the project
# We use 90% confidence level to make our decision
if(testStatistics[length(testStatistics)] >= criticalValues[dim(criticalValues)[1],1])
return(TRUE)
else
return(FALSE)
})
setGeneric(name="getOptimalEigenvector",def=function(theObject){ standardGeneric("getOptimalEigenvector")})
# This function fetches and returns the optimal cointegrated eigenvector/hedgeratio's
# Note: the function assumes that the johansen procedure was executed on the Portfolio
setMethod(f="getOptimalEigenvector",signature="CointegrationTestPortfolio",definition=function(theObject)
{
# Return eigenvector that has maximum eigenvalue. Only fetch values that correspond to the hedge ratio's, we do not need the constant coefficient
return(getCointegrationResults(theObject)@V[1:length(getCurrencyPairList(theObject)),which.max(getCointegrationResults(theObject)@lambda)])
})
setGeneric(name="getPortfolioSpreadAndTransactionCosts",def=function(theObject){ standardGeneric("getPortfolioSpreadAndTransactionCosts")})
# This method calculates and returns the Portfolio spread and the Portfolio transaction costs that are incurred when buying/selling the portfolio at the corresponding timeStamp
# Calculation depends on the individual price series of the CurrencyPair objects and the optimal eigenvector associated to the results of the cointegration test
# A dataframe with Time stamps and the value of the spread is returned
# Note: the function assumes that the johansen procedure was executed on the Portfolio
setMethod(f="getPortfolioSpreadAndTransactionCosts",signature="CointegrationTestPortfolio",definition=function(theObject)
{
# Fetch timestamps for first CurrencyPair (timestamps are alligned for other CurrencyPair objects)
timeStamps <- getTimeSeriesData(getCurrencyPairList(theObject)[[1]])$Time
begin <- timeStamps[1]
end <- timeStamps[length(timeStamps)]
# fetch dataframe containing bidprices of all the CurrencyPair objects in the Portfolio
prices <- getAllPrices(theObject,begin,end)
# Fetch optimal eigenvector
evec <- getOptimalEigenvector(theObject)
# Calculate the spread
# hegde ratio's / eigenvector values are multiplied with their associated price values and summation is made
# If the Portfolio is cointegrated then the resulting spread should be stationary
portfolioSpread <- rowSums(t(evec*t(prices)))
# fetch dataframe containing transaction costs of all the CurrencyPair objects in the Portfolio
transactionCosts <- getAllTransactionCosts(theObject,begin,end)
# Absolute value of the hegderatio's represent the amount of units of each individual CurrencyPair that we buy or sell
# We multiply these amounts by the transaction costs per CurrencyPair unit and make the summation to obtain the total transaction cost at this point in time
portfolioTransactionCosts <- rowSums(t(abs(evec)*t(transactionCosts)))
dataframe <- data.frame(Time=timeStamps,Spread=portfolioSpread,TransactionCosts=portfolioTransactionCosts)
# Convert Time to POSIXlt format
dataframe$Time <- timeStamps
return(dataframe)
})
setGeneric(name="calculateHalfLife",def=function(theObject){ standardGeneric("calculateHalfLife")})
# This function calculates the halflife of mean reversion and returns the result
# dy(t) = (lambda*y(t-1) + mu)dt + dE
# Halflife = -log(2)/lambda
# Note: the function assumes that the johansen procedure was executed on the Portfolio
setMethod(f="calculateHalfLife",signature="CointegrationTestPortfolio",definition=function(theObject)
{
portfolioSpread = getPortfolioSpreadAndTransactionCosts(theObject)$Spread
laggedPortfolioSpread = vect_lag(portfolioSpread,1)
deltaSpread = portfolioSpread-laggedPortfolioSpread
laggedPortfolioSpread <- laggedPortfolioSpread[!is.na(laggedPortfolioSpread)]
deltaSpread <- deltaSpread[!is.na(deltaSpread)]
fit <- lm(deltaSpread ~ laggedPortfolioSpread)
result <- -log(2)/fit$coefficients[2]
return(result)
})
setGeneric(name="plotSpread",def=function(theObject,details=FALSE){ standardGeneric("plotSpread")})
# This function plots the Portfolio spread and shows the average and the +- 1 and 2 standard deviations
# Note: the function assumes that the johansen procedure was executed on the Portfolio
setMethod(f="plotSpread",signature="CointegrationTestPortfolio",definition=function(theObject,details=FALSE)
{
currencyPairs <- getCurrencyPairList(theObject)
timeStamps <- getPortfolioSpreadAndTransactionCosts(theObject)$Time
spread <- getPortfolioSpreadAndTransactionCosts(theObject)$Spread
halfLife <- calculateHalfLife(theObject)
halfLifeString <- paste(" (HalfLife is ",ceiling(halfLife),' days)',sep="")
nrPairs <- length(currencyPairs)
evec <- getOptimalEigenvector(theObject)
currencyString <- paste('(',getIdentifier(currencyPairs[[1]]),sep="")
spreadString <- paste(evec[1], "*", getIdentifier(currencyPairs[[1]]),sep="")
for(j in 2:nrPairs)
{
currencyString <- paste(currencyString,',',getIdentifier(currencyPairs[[j]]),sep="")
sign = "-"
if(evec[j] > 0)
sign = "+"
spreadString <- paste(spreadString,sign,round(abs(evec[j]),2),"*",getIdentifier(currencyPairs[[j]]),sep="")
}
currencyString <- paste(currencyString,')',sep="")
if(details)
plot(timeStamps,spread,xlab=paste("Time",halfLifeString),ylab="Spread",main=spreadString,type="l")
else
plot(timeStamps,spread,xlab=paste("Time",halfLifeString),ylab="Spread",main=currencyString,type="l")
abline(h=c(mean(spread),mean(spread)+sd(spread),a=mean(spread)+2*sd(spread),mean(spread)-sd(spread),mean(spread)-2*sd(spread)),col=c("green","blue","red","blue","red"))
}) |
1e452c77523b7c5fff2abf7390a06308402ef53e | c5548b79560cec8ebf6b300074140cb22f2a88a3 | /Archive/Functions (Kopie)/Plotting/var_map_plot.R | 79014c92c2321b73e8208dc86d1881da3020a496 | [] | no_license | ginnyweasleyIUP/202002_PaperDraft | 6bb191087ac25044b55b1d7b3036e8b4257b3cab | e0e44e6adc14ae661f4fe144c8271bd06b15351c | refs/heads/master | 2020-12-29T20:02:42.360975 | 2020-05-13T23:29:06 | 2020-05-13T23:29:06 | 238,714,663 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,336 | r | var_map_plot.R | library(plyr)
library(dplyr)
library(rgdal)
library(latex2exp)
source("Functions/STACYmap_5.R")
source("Functions/projection_ptlyr.R")
var_map_plot <- function(Point_Lyr,
projection = as.character('+proj=robin +datum=WGS84'),
pt_size,
txt_size){
Point_Lyr_p <- projection_ptlyr(Point_Lyr, projection)
allmax = max(abs(Point_Lyr$value))
plot <- STACYmap(coastline = TRUE) +
geom_point(data = Point_Lyr_p, aes(x = long, y = lat, fill = layer), color = "black", shape = 21,
size = (pt_size), alpha = 1.0, show.legend = c(fill =TRUE)) +
scale_fill_gradientn(colors = rev(RColorBrewer::brewer.pal(11, 'RdBu')),
limits = c(log(0.025),log(40)),
breaks = c(log(0.025), log(0.05), log(0.25), log(0.5), 0, log(2), log(4), log(20), log(40)),
labels = c(0.025, "", 0.25, "", 1, "", 4, "", 40),
name = TeX("$Var_{Rec}/Var_{Sim}$")) +
theme(legend.direction = "horizontal",
panel.border = element_blank(),
legend.background = element_blank(),
axis.text = element_blank(),
text = element_text(size = txt_size),
legend.title = element_text(size = txt_size))
return(plot)
}
|
6a943c98c0c525a23ecbccac321e30c9c823f8d5 | 10c9ef05b910c971359b8e91adbf7d6bf6fc4dc2 | /Extract/plotSignal.r | 8801c45b82bdd6fc48377c079fb627e3299f352f | [] | no_license | MaxwellSam/Modelisation_Cpp_Projet | 0e7e196cddfca7252ea403ae51239a1c46c0dc7d | 2303775aa66791e6f9a72758e2cba74af94576c2 | refs/heads/master | 2023-04-02T00:47:16.783227 | 2021-04-15T18:59:50 | 2021-04-15T18:59:50 | 353,645,147 | 0 | 0 | null | 2021-04-13T19:54:48 | 2021-04-01T09:26:57 | C++ | UTF-8 | R | false | false | 341 | r | plotSignal.r | # CPP Projet Modelisation
# file name : plotSignal.r
# authors : Belmahi Asmae, Maxwell Sam
# date : 14/04/2021
dev.new(width=7, height=4)
df=read.table("channel_2.txt",header=T)
plot(df[,1], df[,2], main = "Evolution of action potential on the first 8 seconds",
type="l",
col="#3182bd",
xlab = "time [ms]",
ylab = "Signal [mv]"
)
|
22ba8463eaee67197aa69cfe20479b149b591d4f | 400b384715f5f02ef43118f792d9eb73de314b2b | /tests/testthat/test-plot_nei.R | 9160c725edc8eae6977c2173477c789550da3516 | [] | no_license | yassato/rNeighborQTL | 8d250721fee767f96dd115324321a1c18ba67e1c | f3c54151f794fa0c9c213c9f0834b8e51aa4d6a4 | refs/heads/master | 2023-04-28T02:43:01.742030 | 2021-05-11T08:06:03 | 2021-05-11T08:06:03 | 253,141,548 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,089 | r | test-plot_nei.R | context("plot_nei")
#F2
set.seed(1234)
data("fake.f2",package="qtl")
fake_f2 <- fake.f2[1:2,1:50]
smap_f2 <- cbind(runif(qtl::nind(fake_f2),1,100),runif(qtl::nind(fake_f2),1,100))
genoprobs_f2 <- qtl::calc.genoprob(fake_f2,step=4)
test_that(
desc = "plot_error_catch",
code = {
f2_scan <- scan_neighbor(genoprobs=genoprobs_f2,
pheno=fake_f2$pheno[,1],
smap=smap_f2, scale=19.37,
addcovar=as.matrix(fake_f2$pheno$sex)
)
f2_int <- int_neighbor(genoprobs=genoprobs_f2,
pheno=fake_f2$pheno[,1],
smap=smap_f2, scale=20,
addcovar=as.matrix(fake_f2$pheno$sex),
addQTL=c("c1_D1M318","c1_D1M212"), intQTL="c1_D1M212",
grouping=fake_f2$pheno$sex)
expect_error(plot_nei(f2_scan,type="int"))
expect_error(plot_nei(f2_int,type="self"))
expect_error(plot_nei(f2_int,type="neighbor"))
}
)
|
72197323e985f9d50b515c7050765f3b26e115ca | 098841409c03478ddae35c4cdf6367cfd65fa3bf | /simu/code/procimpute/scVI_latent.R | 201cdda18a7d5e37122ad0b35e9b74ace739cd46 | [] | no_license | wangdi2016/imputationBenchmark | 0281746b482788c347faf9d96e8288639ba388a6 | 0881121444975cd0a3ee2ce69aaec46c3acd7791 | refs/heads/master | 2023-07-29T10:16:14.004610 | 2021-09-09T20:00:43 | 2021-09-09T20:00:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 648 | r | scVI_latent.R | library(data.table)
allf <- sub('.csv','',list.files('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/simu/result/impute/scVI_latent'))
#getf <- sub('.rds','',list.files('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/simu/result/procimpute/scVI_latent'))
#runf <- setdiff(allf, getf)
runf <- allf
res <- sapply(runf,function(f) {
print(f)
sexpr <- t(as.matrix(fread(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/simu/result/impute/scVI_latent/',f,'.csv'),data.table = F)))
saveRDS(sexpr,file=paste0("/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/simu/result/procimpute/scVI_latent/",f,'.rds'))
})
|
d84faab2a215010925cc682f4d1f6c6023b1cb69 | f6feae21f96d836ef878bf75206d62dffa60b93a | /src/R/hydra/dspNgs/R/read_dataset.R | e2f8dbf000a5e3dcf4e39a68337d2bb2695e4d62 | [] | no_license | whwanglab/PDAC | 77060a38d11f427246aa5754dcf542057cccdc2a | 661dbc0f92986729d43eb6ba1e4b4aeb91dc52e6 | refs/heads/main | 2023-04-12T23:31:22.222531 | 2022-04-27T04:14:12 | 2022-04-27T04:14:12 | 401,784,565 | 10 | 6 | null | null | null | null | UTF-8 | R | false | false | 23,588 | r | read_dataset.R | #' Reads in DSP dataset
#'
#' @export parallel_read
#' @export read_dataset
#' @export reformat_dataset
#' @export remove_gene_outliers
#' @export remove_LOQ
#' @export remove_blacklist_genes
#' @export change_LOQ
#' @export drop_expected_negs
#' @export drop_samples
#' @title Function that parallelizes spreadsheet read
#' @param file string value with file name
#' @param get_cols boolean indicating if column names should be preserved
#' @return list of data.frame objects containing data from /code{file} tabs
#' @examples
#' parallel_read(file="./testData/DA_data/Q3_normalization/Q3 norm.xlsx")
parallel_read <- function(file, get_cols=TRUE){
# detect available cores
numCores = round(parallel::detectCores() * .65)
# get sheet names
sheets <- readxl::excel_sheets(file)
if(.Platform$OS.type == "windows") {
print("OS = Windows")
cl <- makePSOCKcluster(numCores)
tmp_dfs <-
parLapplyLB(cl,
sheets,
function(sheet, file) {
readxl::read_excel(file, sheet=sheet, col_names=get_cols)
},
file)
stopCluster(cl)
} else {
tmp_dfs <- parallel::mclapply(excel_sheets(file),
readxl::read_excel,
path = file,
col_names = get_cols,
mc.cores=numCores)
}
names(tmp_dfs) <- sheets
return(tmp_dfs)
}
#' @title function to reformat DSP-DA data to be compatible with Hydra
#' @param curr_dfs list of current data frames resulting
#' from \code{\link{read_dataset}}
#' @param norm_type string identifying the type of normalization used
#' @return list of data frames containing reformatted /code{curr_dfs}
#' @examples
#' Q3_dfs <- read_dataset(xl_path="./testData/DA_data/Q3_normalization",
#' raw_xl="ProbeQC - Default.xlsx",
#' norm_xl="Q3 Norm.xlsx",
#' norm_type="Q3")
#' reformat_dataset(curr_dfs=Q3_dfs, norm_type="Q3")
reformat_dataset <- function(curr_dfs, norm_type) {
# Remove appended sheet names for column names read in
for (idx in names(curr_dfs)) {
if (nrow(curr_dfs[[idx]]) < 1) {
stop(paste0("Missing excel tab with ", idx, " data."))
}
# Associate column names with appropriate format
colnames(curr_dfs[[idx]]) <- curr_dfs[[idx]][1, ]
curr_dfs[[idx]] <- curr_dfs[[idx]][-1, ]
}
# dfs[[1]] Raw collapsed counts
# Change counts to numeric and at least 1
curr_dfs[[1]][,2:ncol(curr_dfs[[1]])] <-
sapply(curr_dfs[[1]][,2:ncol(curr_dfs[[1]])], as.numeric)
curr_dfs[[1]][is.na(curr_dfs[[1]])] <- 1
colnames(curr_dfs[[1]]) <-
sapply(colnames(curr_dfs[[1]]), make_name_valid)
rownames(curr_dfs[[1]]) <- curr_dfs[[1]][["TargetName"]]
# dfs[[2]] Normalized counts
# Change counts to numeric
curr_dfs[[2]][,2:ncol(curr_dfs[[2]])] <-
sapply(curr_dfs[[2]][,2:ncol(curr_dfs[[2]])], as.numeric)
colnames(curr_dfs[[2]]) <-
sapply(colnames(curr_dfs[[2]]), make_name_valid)
rownames(curr_dfs[[2]]) <- curr_dfs[[2]][["TargetName"]]
# dfs[[3]] Segment properties
# Replace SegmentDisplayName column name with Sample_ID and reorder
curr_dfs[[3]][["Sample_ID"]] <- curr_dfs[[3]][["SegmentDisplayName"]]
curr_dfs[[3]] <-
select(curr_dfs[[3]], Sample_ID, everything(), -SegmentDisplayName)
# Ensure naming format compatible
curr_dfs[[3]][["Sample_ID"]] <-
sapply(curr_dfs[[3]][["Sample_ID"]], make_name_valid)
# Identify numeric columns
cols_num <- c("RawReads", "AlignedReads", "DeduplicatedReads", "TrimmedReads", "StitchedReads")
curr_dfs[[3]][cols_num] <- sapply(curr_dfs[[3]][cols_num], as.numeric)
# dfs[[4]] Experiment summary
# No changes to dataset summary data frame
curr_dfs[[4]] <-
curr_dfs[[4]]
# dfs[[5]] Target properties
# Make sure target notes has a Pooling column of character type
colnames(curr_dfs[[5]])[colnames(curr_dfs[[5]]) == "ProbePool"] <- "Pooling"
curr_dfs[[5]] <-
transform(curr_dfs[[5]], Pooling=as.character(Pooling))
# Replace DSP-DA naming convention
curr_dfs[[5]][["TargetGroup"]] <- gsub("All Targets", "All Probes", curr_dfs[[5]][["TargetGroup"]])
curr_dfs[[5]][["TargetGroup"]] <- gsub(",", ";", curr_dfs[[5]][["TargetGroup"]])
# Relabel segment property LOQ, negative geomean, and normalization factor columns
pool_ids <- unique(curr_dfs[[5]][["Pooling"]])
LOQ_key <- "Standard deviation amount for the LOQ"
for (pool in pool_ids) {
# Get LOD used in DSP-DA to override user input
DA_LOQ <-
as.numeric(curr_dfs[[4]][curr_dfs[[4]][, 1] == LOQ_key &
!is.na(curr_dfs[[4]][, 1] == LOQ_key), 2])
assign("LOQ_level", DA_LOQ, envir=.GlobalEnv)
geo_col <- paste0("GeoLOQ", LOQ_level, "_", pool)
curr_dfs[[3]][[geo_col]] <- as.numeric(curr_dfs[[3]][["LOQ"]])
# Pull out single negative geometric mean from raw collapsed counts for DSP-DA v2.0
neg_name <-
curr_dfs[[5]][curr_dfs[[5]][["CodeClass"]] == "Negative" &
curr_dfs[[5]][["Pooling"]] == pool, "TargetName"]
neg_geo <- curr_dfs[[1]][curr_dfs[[1]][["TargetName"]] == neg_name,
2:ncol(curr_dfs[[1]])]
if(nrow(neg_geo) != 1) {
stop("Negatives not mapping 1:1.
Make sure negative target names unique for each pool and
each negative exists in collapsed count data.")
}
neg_geo_col <- paste0("NegGeoMean_", pool)
curr_dfs[[3]][[neg_geo_col]] <- unlist(neg_geo[, curr_dfs[[3]][["Sample_ID"]]])
# Accomodates DSP-DA v2.0 with one norm factor for all pools
# This feature only accurate for single panel results from DA v2.0
if (norm_type == "Neg") {
neg_col <- paste0("NormFactorNeg_", pool)
curr_dfs[[3]][[neg_col]] <- as.numeric(curr_dfs[[3]][["NormalizationFactor"]])
}
}
# Add column with correct column header for Q3 or HK normalization factors
if (norm_type != "Neg") {
norm_col <- paste0("NormFactor", norm_type)
curr_dfs[[3]][[norm_col]] <- as.numeric(curr_dfs[[3]][["NormalizationFactor"]])
}
return(curr_dfs)
}
#' @title Reads in DSP-DA excel workbook for analysis and visualiation
#' @param xl_path string specifying path to dsp dataset excel workbooks
#' @param raw_xl string specifying name of raw collapsed count workbook
#' @param norm_xl string specifying name of normalized count workbook
#' @param norm_type string indicating type of normalization used in DSP-DA.
#' Accepted values: "Q3", "Neg", "HK"
#' @return list of dataframes, one for each tab
#' @examples
#' read_dataset(xl_path="./testData/DA_data/Q3_normalization",
#' raw_xl="ProbeQC - Default.xlsx",
#' norm_xl="Q3 Norm.xlsx",
#' norm_type="Q3")
read_dataset <- function(xl_path, raw_xl, norm_xl, norm_type){
# Contains two tabs from initial dataset export, one containing raw counts
raw_dfs <- parallel_read(paste(xl_path, raw_xl, sep="/"), get_cols=FALSE)
# Contains five tabs from scaled/normalized data export
norm_dfs <- parallel_read(paste(xl_path, norm_xl, sep="/"), get_cols=FALSE)
# Associate dataframes with corresponding excel sheet data
DA_dfs <- list()
DA_dfs[[1]] <- data.frame(raw_dfs["TargetCountMatrix"]) # counts
DA_dfs[[2]] <- data.frame(norm_dfs["TargetCountMatrix"]) # normalized counts
DA_dfs[[3]] <- data.frame(norm_dfs["SegmentProperties"]) # ROI/AOI annotations
DA_dfs[[4]] <- data.frame(norm_dfs["Dataset summary"]) # summary of experiment
# Use probe QC target properties since negative dropped by scaling
DA_dfs[[5]] <- data.frame(raw_dfs["TargetProperties"]) # target notes
names(DA_dfs) <- c("RawCountMatrix", "TargetCountMatrix",
"SegmentProperties", "Dataset summary", "TargetProperties")
# Reformat to match txt dataframes format
DA_dfs <- unname(reformat_dataset(curr_dfs=DA_dfs, norm_type=norm_type))
# return list of dataframes
return(DA_dfs)
}
# remove gene outliers from dataset
remove_gene_outliers <- function(df, outlier_cutoff, thresh){
#read in norm_counts
norm_counts <- df[[2]]
rownames(norm_counts) <- norm_counts[, "TargetName"]
norm_counts[["TargetName"]] <- NULL
#genes removed with total gene count larger than
#given outlier_cutoff x average gene count
w2kp <- which(rowSums(norm_counts) > (mean(rowSums(norm_counts) * outlier_cutoff)))
if (length(w2kp) != 0){
flog.info("Genes removed with high gene count across all ROIs", name="DSP_NGS_log")
flog.info(str_wrap(paste(row.names(norm_counts[w2kp,]), collapse = ", "), width = 120),
name="DSP_NGS_log")
print(paste0("Removed ", length(w2kp), " out of ", nrow(norm_counts), " genes with high gene count"))
norm_counts <- norm_counts[-w2kp,]
}
samp_notes <- df[[3]]
geomean <- grep(names(samp_notes), pattern = "GeoMean")
geomean <- apply(samp_notes[, geomean, drop=FALSE], 1, median)
w2kp <- which(cor(y = geomean, x = t(norm_counts), method = "spearman") > thresh)
if (length(w2kp) != 0){
flog.info("Genes removed with high correlation to negative probes across all ROIs",
name="DSP_NGS_log")
flog.info(str_wrap(paste(row.names(norm_counts[w2kp,]), collapse = ", "), width = 120),
name="DSP_NGS_log")
print(paste0("Removed ", length(w2kp), " out of ", nrow(norm_counts), " genes with high correlation to negative probes"))
norm_counts <- norm_counts[-w2kp,]
}
norm_counts <- cbind(row.names(norm_counts), norm_counts)
colnames(norm_counts)[1] <- "Gene"
return(norm_counts)
}
remove_LOQ <- function(dfs, LOQ_level = LOQ_level, LOQ_cutoff = LOQ_cutoff,
grp_var = grouping_var, ctrl_var = control_var){
counts <- dfs[[1]][,-1]
rownames(counts) <- dfs[[1]][,1]
w2kp <- which(rownames(counts) %in% dfs[[2]]$Gene)
counts <- counts[w2kp,]
targets <- dfs[[5]]
rownames(targets) <- dfs[[5]]$HUGOSymbol
# Filter out targets with no target information including NegProbe
counts <- counts[rownames(counts) %in% rownames(targets), ]
AOIs <- dfs[[3]]
rownames(AOIs) <- AOIs$Sample_ID
if(any(rownames(AOIs) != colnames(counts))){
counts <- counts[,match(rownames(AOIs),colnames(counts))]
AOIs <- AOIs[match(colnames(counts), rownames(AOIs)),]
}
#same code as from run_seqQC
LOQs <- lapply(rownames(counts), function(x) {
# Get counts for target
row <- counts[x,]
# Get Pool Name and Column
pool <- targets[x,'Pooling']
poolcol <- paste0('GeoLOQ', LOQ_level, '_', pool)
loqs <- AOIs[,poolcol]
loqs[loqs < LOQ_floor] <- LOQ_floor
loqtest <- row > t(loqs)
return(loqtest)
})
LOQs <- data.frame(matrix(unlist(LOQs), nrow=length(LOQs), byrow=T,
dimnames = list(rownames(counts), colnames(counts))))
percents <- apply(LOQs, 2, function(x){sum(x)})
Percent <- as.data.frame((percents/nrow(LOQs))*100)
names(Percent) <- "Percent"
AOI <- data.frame(AOIs$Sample_ID, AOIs[[grp_var]], AOIs[[ctrl_var]])
names(AOI) <- c("AOI", "grp", "ctrl")
order <- match(AOI$AOI, rownames(Percent))
percents <- percents[order]
data <- data.frame(AOI, Percent[order,])
colnames(data)[4] <- "Percent"
if(nrow(data) > 75){
labels <- theme(axis.text.y=element_blank())
}else{
labels <- NULL
}
#barplot of percent of genes above LOQ in every AOI
#colored by AOI Type (grp, ctrl variables)
#AOI labels are printed if there are less than 76
gp <- ggplot(data, aes(y=Percent, x=reorder(AOI, Percent), fill=paste(grp, ctrl), color=paste(grp, ctrl))) +
geom_bar(stat = "identity", position = position_stack(reverse=TRUE)) +
scale_y_discrete(limits = seq(0,100,10)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 15)) +
scale_fill_manual(values=colors$AOIs) +
scale_color_manual(values=colors$AOIs) +
ggtitle("Percent of Genes at or above LOQ in every AOI") +
xlab("AOI") +
coord_flip() +
theme(axis.ticks.y=element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_line(colour = "darkgray"),
legend.title = element_blank())+
labels
ggsave(plot = gp, filename = paste0(qc_dir, "/LOQ.", fileType), device = fileType, height = 8, width = 12, scale = 1.5)
#Boxplots of percents of genes above LOQ in different AOI Types (grp, ctrl variables)
gp <- ggplot(data, aes(x= paste(grp, ctrl), y = Percent, fill = paste(grp, ctrl)))+
geom_boxplot()+
scale_fill_manual(name = 'AOI type', values = colors$AOIs)+
labs(x = "", y = "Percent of Genes at or above LOQ", fill = "AOI Type", title = "Percent of Genes at or above LOQ in different AOI types")+
theme(axis.text.x = element_blank(), axis.ticks.x=element_blank())
ggsave(plot = gp, filename = paste0(qc_dir, "/LOQ_percent_boxplot.", fileType),
device = fileType, height = 8, width = 8, scale = 1.5)
data$count <- percents
#Boxplots of gene counts above LOQ in different AOI Types (grp, ctrl variables)
gp <- ggplot(data, aes(x= paste(grp, ctrl), y = count, fill = paste(grp, ctrl)))+
geom_boxplot()+
scale_fill_manual(name = 'AOI type', values = colors$AOIs)+
labs(x = "", y = "Number of Genes at or above LOQ", fill = "AOI Type", title = "Number of Genes at or above LOQ in different AOI types")+
theme(axis.text.x = element_blank(), axis.ticks.x=element_blank())
ggsave(plot = gp, filename = paste0(qc_dir, "/LOQ_counts_boxplot.", fileType),
device = fileType, height = 8, width = 12, scale = 1.5)
#scatter plot of gene counts above LOQ vs sat_numcrit variable
#colored by AOI Type (grp, ctrl variables)
if(!is.null(sat_numcrit)){
data[[sat_numcrit]] <- AOIs[[sat_numcrit]]
gp <- ggplot(data, aes(x = data[[sat_numcrit]], y = count, col = paste(grp, ctrl)))+
geom_point()+
labs(x = sat_numcrit, y = "Gene count at or above LOQ", col = "AOI Type", title = paste0("Gene count at or above LOQ vs ", sat_numcrit))+
scale_color_manual(name = 'AOI type', values = colors$AOIs)
ggsave(plot = gp, filename = paste0(qc_dir, "/LOQ_counts_vs_", sat_numcrit, ".", fileType),
device = fileType, height = 8, width = 12, scale = 1.5)
}
#scatter plot of gene counts above LOQ vs cell_counts
#colored by AOI Type (grp, ctrl variables)
if(!is.null(cell_count)){
if(cell_count != sat_numcrit){
data[[cell_count]] <- AOIs[[cell_count]]
gp <- ggplot(data, aes(x = data[[cell_count]], y = count, col = paste(grp, ctrl)))+
geom_point()+
labs(x = cell_count, y = "Gene count at or above LOQ", col = "AOI Type", title = paste0("Gene count at or above LOQ vs ", cell_count))+
scale_color_manual(name = 'AOI type', values = colors$AOIs)
ggsave(plot = gp, filename = paste0(qc_dir, "/LOQ_counts_vs_", cell_count, ".", fileType),
device = fileType, height = 8, width = 12, scale = 1.5)
}
}
above_LOQ <- apply(LOQs, 1, function(x){sum(x)})
above_LOQ <- above_LOQ/ncol(LOQs)
below_LOQ <- 1-above_LOQ
#which genes are below LOQ in a higher percentage of AOIs than LOQ_cutoff
#genes to be removed
w2kp <- which(below_LOQ >= LOQ_cutoff)
if (length(w2kp) != 0){
flog.info(paste0("Genes removed below LOQ in ", LOQ_cutoff * 100,"% of AOIs", "(",
length(w2kp), "/", nrow(counts), ")"), name="DSP_NGS_log")
if (length(w2kp) < 300){
flog.info(str_wrap(paste(row.names(counts[w2kp,]), collapse = ", "), width = 120),
name="DSP_NGS_log")
}
print(paste0("Removed ", length(w2kp), " out of ", nrow(counts), " genes"))
norm_counts <- dfs[[2]][-w2kp,]
}else{
flog.info(paste0("Genes removed below LOQ in ", LOQ_cutoff * 100,"% of AOIs", "(0/",
nrow(counts), ")"), name="DSP_NGS_log")
norm_counts <- dfs[[2]]
}
plt_df <- as.data.frame(above_LOQ)
# add plot colors for multiple genesets
plt_df$color <- 'Not Specified'
if(tolower(names(genes[1])) != "all probes") {
for (i in 1:length(genes)){
w2kp <- which(rownames(plt_df) %in% unlist(genes[i]))
w2kp2 <- which(plt_df$color[w2kp] != "Not Specified")
plt_df$color[w2kp] <- str_wrap(names(genes[i]), wrap_num)
plt_df$color[w2kp2] <- "Multiple"
}
}
w2kp <- which(rownames(plt_df) %in% fav_genes)
plt_df$color[w2kp] <- "Fav.Genes"
plt_df$color[tolower(plt_df$color) == "all probes"] <- "Not Specified"
plt_df$color <- factor(plt_df$color)
#plt_df <- plt_df[which(plt_df$color != "Not Specified"),]
plt_df$gene <- rownames(plt_df)
hist <- hist(plt_df$above_LOQ, breaks=seq(0, 1, by = 0.02), plot =FALSE, right = TRUE)
plt_df$y = 0
for(i in 1:nrow(plt_df)){
bin <- max(which(hist$breaks <= plt_df$above_LOQ[i]))
bin <- ifelse(bin==1, bin, bin-1)
plt_df$y[i] <- runif(1, min = 1, max = hist$counts[bin])
}
#histogram of percent of AOIs genes are above LOQ in
#overlayed by scatterplot of individual genes in given gene sets or fav genes
#dropped genes are shaded on histogram and labeled in scatterplot
gp <- ggplot(plt_df, aes(x = above_LOQ, label = gene))+
geom_histogram(fill = "Green3", breaks = hist$breaks)+
labs(x = "Percent of AOIs individual genes are at or above LOQ in", y = "Gene Count",
title = "Distribution of genes above LOQ in AOIs")+
geom_rect(aes(xmin = -Inf, ymin = -Inf, ymax = Inf, xmax = 1-LOQ_cutoff), alpha = 0.006, fill = "grey55")+
annotate("text", x = (1-LOQ_cutoff)/2, y = max(hist$counts)/2,
label = paste(length(which(above_LOQ < (1-LOQ_cutoff))), "genes\nremoved"))+
geom_point(data = plt_df[which(plt_df$color != "Not Specified"),], aes(y = y, color = color, x = above_LOQ - 0.01))+
scale_color_manual(name = 'GeneSet', values = colors$gene_groups)+
geom_text_repel(data = plt_df[plt_df$above_LOQ < 1-LOQ_cutoff & plt_df$color != "Not Specified",],
aes(y = y), color = 'black', box.padding = .4, point.padding = .2,
fontface = 'bold', force = 4, nudge_y = 5, min.segment.length = 0.1)
ggsave(plot = gp, filename = paste0(qc_dir, "/LOQ_genes_removed_histogram.", fileType),
device = fileType, height = 8, width = 12, scale = 1.5)
data <- data[order(-data$Percent),]
high <- subset(data, Percent >= signif(max(Percent)-10, digits = 1))
low <- subset(data, Percent <= signif(min(Percent)+10, digits = 1))
flog.info(paste("ROIs with more than", signif(max(Percent)-10, digits = 1),
"% of probes at or above LOQ, highest to lowest: ", high$AOI, high$grp, high$ctrl,
sep = " "), name="DSP_NGS_log")
flog.info("\n\n", name="DSP_NGS_log")
flog.info(paste("ROIs with less than", signif(min(Percent)+10, digits = 1),
"% of probes at or above LOQ, highest to lowest: ", low$AOI, low$grp, low$ctrl,
sep = " "), name="DSP_NGS_log")
# relevel norm_counts after dropping genes
# Payman
str(norm_counts)
norm_counts$Gene <- as.factor(norm_counts$Gene)
norm_counts$Gene <- droplevels(norm_counts$Gene)
return(norm_counts)
}
remove_blacklist_genes <- function(dfs, rm_genes){
w2rm <- which(dfs[[2]]$Gene %in% rm_genes)
if (length(w2rm) > 0){
dfs[[2]] <- dfs[[2]][-w2rm,]
}
return(dfs[[2]])
}
change_LOQ <- function(dfs, LOQ_level = 2.5){
samp_notes <- dfs[[3]]
pools <- unique(dfs[[5]]$Pooling)
for (pool in pools){
name <- paste0("GeoLOQ", LOQ_level, "_", pool)
samp_notes[[name]] <- data.frame(samp_notes[[paste0("NegGeoMean_", pool)]] *
(samp_notes[[paste0("NegGeoSD_", pool)]]^LOQ_level))
colnames(samp_notes[[name]]) <- name
}
return(samp_notes)
}
drop_expected_negs <- function(df, neg_flag = 1){
raw_counts <- df[[1]]
norm_counts <- df[[2]]
samp_notes <- df[[3]]
negs <- samp_notes$Sample_ID[samp_notes$expected_neg == neg_flag]
raw_drop <- which(colnames(raw_counts) %in% negs)
norm_drop <- which(colnames(norm_counts) %in% negs)
samp_drop <- which(samp_notes$Sample_ID %in% negs)
df[[1]] <- raw_counts[,-raw_drop]
df[[2]] <- norm_counts[,-norm_drop]
df[[3]] <- samp_notes[-samp_drop,]
return(df)
}
#' @title Function that drops samples that meet a given criteria
#' @param df list of data.frame objects
#' @param cols what column(s) can criteria be found in Segment Properties
#' @param conditions criteria to find in segment properties
#' @param keep should samples that match conditions be kept
#' @param and if multiple conditions, should samples match both conditions or just one
#' @param custom custom filtering equation
#' @return list of data.frame objects with samples dropped
#' @examples
#' drop_samples(cols = "tissue", conditions = "CRC7", keep = TRUE, and = TRUE, custom = NULL)
drop_samples <- function(df, cols, conditions, operators, keep = FALSE, and = TRUE, custom){
if(is.null(custom)){
drop <- NULL
values <- 0
for(value in 1:length(cols)){
if(! cols[value] %in% colnames(df[[3]])){
print(paste(cols[value], "is not a valid column; skipping matching for this pairing"))
}else{
if(operators[value] == "!="){
drop <- c(drop, which(df[[3]][,cols[value]] != conditions[value]))
}else if(class(df[[3]][[cols[value]]]) == "factor" | class(df[[3]][[cols[value]]]) == "character" |
operators[value] == "=="){
drop <- c(drop, which(df[[3]][,cols[value]] == conditions[value]))
}else if(operators[value] == ">"){
drop <- c(drop, which(df[[3]][,cols[value]] > as.numeric(conditions[value])))
}else if(operators[value] == ">="){
drop <- c(drop, which(df[[3]][,cols[value]] >= as.numeric(conditions[value])))
}else if(operators[value] == "<"){
drop <- c(drop, which(df[[3]][,cols[value]] < as.numeric(conditions[value])))
}else if(operators[value] == "<="){
drop <- c(drop, which(df[[3]][,cols[value]] > as.numeric(conditions[value])))
}
values <- values + 1
}
}
if(values > 1){
if(and){
drop <- as.numeric(names(table(drop)[table(drop) == values]))
}else{
drop <- unique(drop)
}
}
}else{
drop <- which(eval(parse(text = custom)))
}
if(keep){
neg_flag <- 0
norm <- 1
}else{
neg_flag <- 1
norm <- 0
}
df[[3]]$expected_neg <- norm
if(length(drop) > 0){df[[3]]$expected_neg[drop] <- neg_flag}
num_drops <- length(which(df[[3]]$expected_neg == 1))
df <- drop_expected_negs(df,neg_flag = 1)
print(paste(num_drops, "samples were dropped;", nrow(df[[3]]), "samples kept"))
flog.info(paste(num_drops, "samples were dropped by user;", nrow(df[[3]]), "samples kept"),
name="DSP_NGS_log")
#drop unused factor levels from segment notes
df[[3]] <- droplevels(df[[3]])
return(df)
}
|
c7616ceb2f23c1f0669e0f01b8189ae91597b14d | f75bc7cffc0d0d85fb05525462ebba5e4f4d7328 | /CIND820 Preliminary.R | cdc285944cd6d5191a850a8548ab598b9bbd8a14 | [] | no_license | bsun0202/CIND820 | d0d900d7a477b00398b80c0e65fc9a93cc0dd64d | 166847b29cba4d43b3d6e0dfe6700c572af49eec | refs/heads/master | 2023-01-14T09:44:59.763172 | 2020-11-17T00:43:42 | 2020-11-17T00:43:42 | 312,957,835 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,401 | r | CIND820 Preliminary.R | Clean_data <- read.csv('C:/Users/Bowen/Desktop/School/CIND820/Data/CleanedAggregatedData.csv',header = TRUE )
str(Clean_data)
summary(Clean_data)
head(Clean_data)
library(lubridate)
Clean_data$LocalDate <-ymd(Clean_data$LocalDate)
library(GGally)
ggpairs(Clean_data[,c(3:7)], size = 0.01)
#No Multi Collinearity in explanatory variables
##############Split Data
set.seed(777)
t_ind <- sample(nrow(Clean_data),size = floor(0.75 * nrow(Clean_data)), replace = F)
training <- Clean_data[t_ind,]
testing <- Clean_data[-t_ind,]
plot(training[c(1:100) ,c(3:8)])
############## Models
no2_model1 <- lm(NO2 ~ City + HumidityValue + PressureValue + TemperatureValue + WindDirectionValue + WindSpeedValue, training )
summary(no2_model1)
no2_model2 <- lm(NO2 ~ HumidityValue + PressureValue + TemperatureValue + WindDirectionValue + WindSpeedValue, training )
summary(no2_model2)
o3_model1 <- lm(O3 ~ City + HumidityValue + PressureValue + TemperatureValue + WindDirectionValue + WindSpeedValue, training )
summary(o3_model1)
o3_model2 <- lm(O3 ~ HumidityValue + PressureValue + TemperatureValue + WindDirectionValue + WindSpeedValue, training )
summary(o3_model2)
SO2_model1 <- lm(SO2 ~ City + HumidityValue + PressureValue + TemperatureValue + WindDirectionValue + WindSpeedValue, training )
summary(SO2_model1)
SO2_model2 <- lm(SO2 ~ HumidityValue + PressureValue + TemperatureValue + WindDirectionValue + WindSpeedValue, training )
summary(SO2_model2)
CO_model1 <- lm(CO ~ City + HumidityValue + PressureValue + TemperatureValue + WindDirectionValue + WindSpeedValue, training )
summary(CO_model1)
CO_model2 <- lm(CO ~ HumidityValue + PressureValue + TemperatureValue + WindDirectionValue + WindSpeedValue, training )
summary(CO_model2)
####Coefficient for Air pressure is not significant, could be due to as variations in air pressure being too small 10/1000, will try to transform value
####WindDirection is not significant as expected, will change to categorize to 8 quadrant wind and use as factor variable and try again
####Cities dummy variable add about 20% explanatory power to the data (about 20% of variation is geography based),
####Some cities seems to exhibit 'average' or mean behaviour for value of pollutants (Shows as non significant from mean)
####Most effective in predicting NO2 at about 50%
####
####Models without air pressure and wind direction
no2_model3 <- lm(NO2 ~ City + HumidityValue + TemperatureValue + WindSpeedValue, training )
summary(no2_model3)
o3_model3 <- lm(O3 ~ City + HumidityValue + TemperatureValue + WindSpeedValue, training )
summary(o3_model3)
SO2_model3 <- lm(SO2 ~ City + HumidityValue + TemperatureValue + WindSpeedValue, training )
summary(SO2_model3)
CO_model3 <- lm(CO ~ City + HumidityValue + TemperatureValue + WindSpeedValue, training )
summary(CO_model3)
####Predictions
predictNO2 <- predict(no2_model3, newdata = testing, interval = 'prediction')
predictO3 <- predict(o3_model3, newdata = testing, interval = 'prediction')
predictSO2 <- predict(SO2_model3, newdata = testing, interval = 'prediction')
predictCO <- predict(CO_model3, newdata = testing, interval = 'prediction')
library("MLmetrics")
NO2MAPE <- MAPE(predictNO2, testing$NO2)
O3MAPE <- MAPE(predictO3, testing$O3)
SO2MAPE <- MAPE(predictSO2, testing$SO2)
COMAPE <- MAPE(predictCO, testing$CO)
pollutants <- c('NO2', 'O3', 'SO2', 'CO')
MAPE <- c(NO2MAPE,O3MAPE,SO2MAPE,COMAPE)
MAPEDF <- data.frame(pollutants, MAPE)
MAPEDF
####MAPE value for O3 and SO2 are infinite due to values close to 0
plot(predictNO2[,1],(predictNO2[,1] - testing$NO2), xlab = 'Predicted NO2 Level', ylab = 'Residual', main = 'NO2 Levels')
plot(predictO3[,1],(predictO3[,1] - testing$O3), xlab = 'Predicted O3 Level', ylab = 'Residual', main = 'O3 Levels')
plot(predictSO2[,1],(predictSO2[,1] - testing$SO2), xlab = 'Predicted SO2 Level', ylab = 'Residual', main = 'SO2 Levels', ylim = c(-5,5))
plot(predictCO[,1],(predictCO[,1] - testing$CO), xlab = 'Predicted CO Level', ylab = 'Residual', main = 'CO Levels', ylim = c(-0.5,0.5))
par(mfrow = c(2,2))
plot(no2_model3)
plot(o3_model3)
plot(SO2_model3)
plot(CO_model3)
#### Heteroscedasticity EXIST
library(ggplot2)
ggplot(data = Clean_data[Clean_data$City == 'Boston',c(1,2,8)],aes(x = LocalDate, y = NO2, color = City))+
geom_line()
#### Autocorrelation also exists
#### Fixing autocorrelation
|
26e84621c192a4ceaf312ee964d8e372d3776286 | a6fc5a4ed790147ef1e2bb2b771a48aad815286e | /man/fast_preprocess.Rd | 0883e2bb14edb4f02ad7f2acf39d7164e72a18a0 | [
"BSD-3-Clause"
] | permissive | tvpham/iq | 04b9965676077161ac15fb0fca310410c46684d8 | 263c8ed978a2d54bd4b84f223bc066c7ecb2d413 | refs/heads/master | 2023-05-10T20:02:19.799534 | 2023-04-30T15:59:06 | 2023-04-30T15:59:06 | 221,994,455 | 15 | 6 | NOASSERTION | 2022-07-26T08:18:02 | 2019-11-15T20:02:21 | C++ | UTF-8 | R | false | false | 1,599 | rd | fast_preprocess.Rd | \name{fast_preprocess}
\alias{fast_preprocess}
\title{
Data filtering and normalization
}
\description{
Filters out low intensities and performs median normalization.
}
\usage{
fast_preprocess(quant_table,
median_normalization = TRUE,
log2_intensity_cutoff = 0,
pdf_out = "qc-plots-fast.pdf",
pdf_width = 12,
pdf_height = 8,
show_boxplot = TRUE)
}
\arguments{
\item{quant_table}{The \code{quant_table} component as returned by \code{fast_read}.
}
\item{median_normalization}{A logical value. The default \code{TRUE} value is to perform median normalization.}
\item{log2_intensity_cutoff}{Entries lower than this value in log2 space are ignored. Plot a histogram of all intensities to set this parameter.}
\item{pdf_out}{A character string specifying the name of the PDF output. A \code{NULL} value will suppress the PDF output.}
\item{pdf_width}{Width of the pdf output in inches.}
\item{pdf_height}{Height of the pdf output in inches.}
\item{show_boxplot}{A logical value. The default \code{TRUE} value is to create boxplots of fragment intensities for each sample.}
}
\value{
A list is returned with the same components as input data in which low intensities are filtered out and median normalization is performed if requested.
}
\references{
Pham TV, Henneman AA, Jimenez CR. iq: an R package to estimate relative protein
abundances from ion quantification in DIA-MS-based proteomics. \emph{Bioinformatics} 2020 Apr 15;36(8):2611-2613.
}
\author{
Thang V. Pham
}
\seealso{
\code{\link{fast_read}}}
|
5dd4085bbcc011043d536c7d680f5ebcc411bd1b | 35caaa9c780137bcecd03097b54c1c0790a47ed4 | /rf.R | 22748a1da50e0159f60d6c1d1bc48fc1155a9035 | [] | no_license | niepeiyun/rrrf | 851c3589506cbbec555a34c8ed643ac69a1d67e4 | 15134a8c82013411df0c12caa0da4f49e9846dea | refs/heads/master | 2021-04-26T03:30:12.936978 | 2017-12-22T05:36:56 | 2017-12-22T05:36:56 | 107,871,880 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,331 | r | rf.R | rf=function(data.train,data.test,M){
a=Sys.time()
CLASS_NAME <- "y"
CLASSES <- unique(data.train[[CLASS_NAME]])
feature_num <- ncol(data.train)-1 #解释变量的数量
use_feature_num <- as.integer(feature_num/2) #用于学习的解释变量的数量
# 决策树声明一个具有模型,学习数据和用作成员变量的说明变量的类
setClass("decisionTree", representation( model = "list", data.train = "data.frame", feature = "vector"))
imp=data.frame( row.names = paste('x',1:feature_num,sep=''),x = paste('x',1:feature_num,sep=''))
trees <- list()
for (i in 1:M){
print(paste('tree:',i))
#随机提取用于学习的数据
#index <- sample(nrow(data.train), nrow(data.train)*rate_of_usedata)
index <- sample(nrow(data.train),nrow(data.train),replace = T)
traindata_a_tree <- data.train[index,]
#随机选取解释变量
dec <- sample(feature_num, use_feature_num)
features <- c(1:ncol(ALLDATA))
features <- features[-dec]
#用选定的说明变量创建训练数据
tree <- new("decisionTree")
tree@data.train <- traindata_a_tree[features]
tree@feature <- features
#学习选定的解释变量和学习数据
treeModel <- rpart(paste(CLASS_NAME, "~.", sep=""), data = tree@data.train, method = "class")
tree@model <- list(treeModel) #rpart返回列表,但是因为它不能被设置为decisionTree为什么它被存储在list $
#decisionTree在列表中存储类
trees <- c(trees, list(tree))
imp=merge(imp,data.frame(x=names(treeModel$variable.importance),importance=treeModel$variable.importance),by='x',all.x=T)
}
# 预测执行
rf.res <- rf_predict(trees, data.test,CLASSES);
c=Sys.time()-a
print(Sys.time()-a)
# Crosstab
# rf.evl = data.frame(rf.res)
# for(i in 1:nrow(as.array(rf.res))){
# pred_class = rf.res[[i]][2];
# ins <- data.frame(Species=c(pred_class[[1]]))
# rf.evl <- rbind(rf.evl, ins)
# }
# print(table(rf.evl[,1],data.test[,5]))
importance=apply(imp[,-1],1,na_mean)
index12=rep(0,4,1)
ss=table(rf.res,as.character(data.test[,feature_num+1]))
index12[2] =ss[2,2]/(ss[1,2]+ss[2,2])
index12[3]=ss[1,1]/(ss[1,1]+ss[2,1])
index12[4]=(ss[1,1]+ss[2,2])/(ss[1,1]+ss[1,2]+ss[2,1]+ss[2,2])
index12[1] =(index12[3]*index12[2])^0.5
names(index12)=c("Gmeans","TPR","TNR","Overall Acurracy")
print(index12)
return(list(index12,c,importance))
}
|
4d3a4a382015cfa124ae0a5ae2b218e3acb3cc55 | 6c37b3af3e8379222b238cb67b877a563a6f3dd4 | /R/plot.feature_value_per_cluster.r | 1c7918b98bd6452980b9d8646e702d10d3c0e07a | [] | no_license | ChristopherBarrington/seuratvis | e809fefabb9f6125d649558b2b860a0c6fe55772 | 413ddca360790eb4c277d0cdc2b14ec2791f1c04 | refs/heads/master | 2021-09-08T09:44:05.645790 | 2021-09-01T07:10:17 | 2021-09-01T07:10:17 | 242,530,342 | 0 | 0 | null | 2020-06-19T06:40:05 | 2020-02-23T14:21:23 | R | UTF-8 | R | false | false | 2,251 | r | plot.feature_value_per_cluster.r | #'
#'
feature_value_per_cluster.plot <- function(id)
plotOutput(outputId=NS(id,'boxplot')) %>% withSpinner()
#'
#'
feature_value_per_cluster.server <- function(input, output, session, picked_feature, cluster_resolution, picked_colours) {
renderPlot(bg='transparent', expr={
req(cluster_resolution$idents)
req(picked_feature$values)
# get the data to plot
cbind(ident=cluster_resolution$idents,
picked_feature$values) %>%
mutate(x={as.character(ident) %>% as.numeric()}) -> data
# if the picked feature has numeric values
if(is.numeric(data$value)) {
# make a values range-type plot
data %>%
# filter(value>0) %>%
group_by(ident, x) %>%
summarise(q25=quantile(value, 0.25), q75=quantile(value, 0.75), median=median(value)) %>%
mutate(is_selected_cluster_id=ident %in% input$cluster_id_picker) %>%
mutate(iqr=q75-q25, lower=q25-1.5*iqr, upper=q75+1.5*iqr) -> cluster_data_summary
cluster_data_summary %>%
gather(key=key, value=y, lower, upper) %>%
ggplot() +
aes(x=x, y=y, colour=ident) +
labs(y='Feature value (median ± 1.5x IQR)') +
geom_line(size=1) +
geom_point(mapping=aes(y=median, fill=is_selected_cluster_id), shape=21, size=3, colour='black') +
scale_fill_manual(values=c(`FALSE`='grey90', `TRUE`='darkorange1')) -> feature_plot
} else { # the picked feature is factor-like
# make a frequency bar plot
data %>%
ggplot() +
aes(x=x, fill=value) +
labs(y='Frequency') +
geom_bar(position=position_dodge2(width=0.9, preserve='single')) -> feature_plot
# geom_bar(position=position_dodge2(width=0.9, preserve='single')) -> feature_plot
}
# add the shared plotting elements
feature_plot +
labs(x='Cluster identifier') +
scale_x_continuous(breaks=seq(from=0, to=100, by=2), minor_breaks=seq(from=1, to=99, by=2), sec.axis=dup_axis(name=NULL)) +
theme_bw() +
theme(legend.position='none',
panel.background=element_rect(fill=picked_colours$background, colour='black'),
strip.background=element_rect(fill=picked_colours$background, colour='black'))}) -> output$boxplot
}
|
fab9c683519fdcc0a32ac491c5518b06020f4079 | a9ba290ca051038e90a885fbfebc3813816b0bd0 | /man/mstv.Rd | 94e38aa76aad0bebc5c367046cd78f81ad9f3fb5 | [
"MIT"
] | permissive | balachia/pcSoftmaxPack | 42165ad440263558fc3bb659aa0211c365b97967 | 760f42668dfbae21866328c4f27141c6375754da | refs/heads/main | 2023-06-17T11:31:22.199299 | 2021-07-21T02:47:26 | 2021-07-21T02:47:26 | 385,530,998 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 383 | rd | mstv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ms_terminal.R
\name{mstv}
\alias{mstv}
\title{Market Share, Terminal, v (value) formula}
\usage{
mstv(v0s, vs)
}
\arguments{
\item{v0s}{value of focal producer at terminal pole (vectorized)}
\item{vs}{value of other producers at terminal pole}
}
\description{
Market Share, Terminal, v (value) formula
}
|
6fd85c884f5b4cf5bc0d0cc82f29a9f07165cd43 | 618fb0a3bb4520996baa2fb841342bbbf6d4e57a | /man/analyze_AssetCorr.Rd | 598a6975177ad32b2ff625a9e7124e80cd636532 | [] | no_license | cran/AssetCorr | ac654a865db222476bf1648d65f65b663dbf1bb9 | fb249d30060a8722786315638780d8922c05c003 | refs/heads/master | 2021-06-03T23:25:10.299461 | 2021-05-05T14:30:02 | 2021-05-05T14:30:02 | 136,313,140 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,038 | rd | analyze_AssetCorr.Rd | \name{analyze_AssetCorr}
\alias{analyze_AssetCorr}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Function to evaluate several default time series simultaneously
}
\description{
To give a first insight of several default time series, this function combines multiple estimator functions (intra and inter) and visualize the results.
}
\usage{
analyze_AssetCorr(DTS,N, B=NA, DB=NA, JC=FALSE, CI_Boot=NA, Adjust=0.0001,
type="bca", Intra=c("AMM","FMM","CMM","JDP1","JDP2","MLE","AMLE","Beta","Mode"),
Inter=c("Copula","Cov","JDP","MLE"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{DTS}{a matrix, containing the default time series of each sector.
%% ~~Describe \code{d} here~~
}
\item{N}{a matrix, containing the number of obligors at the beginning of the period of sector.
%% ~~Describe \code{n} here~~
}
\item{B}{an integer, indicating how many bootstrap repetitions should be used for the single bootstrap corrected estimate (intra and inter).
}
\item{DB}{a combined vector, indicating how many bootstrap repetitions should be used for the inner (first entry) and outer loop (second entry) to correct the bias using the double bootstrap (intra and inter).
}
\item{JC}{ a logical variable, indicating if the jackknife corrected estimate should be calculated (intra and inter).
%% ~~Describe \code{JC} here~~
}
\item{CI_Boot}{a number, indicating the desired confidence interval if the single bootstrap correction is specified.
By default, the interval is calculated as the bootstrap corrected and accelerated confidence interval (Bca). Furthermore, the analytical confidence intervals are provided, using the same value as \code{CI_Boot}(intra and inter).
}
\item{Adjust}{a number, which should be added to a observed default rate of 0 or subtracted form a observed default rate of 1 (only for the intraAMLE).}
\item{type}{a string, indicating the desired method to calculate the bootstrap confidence intervals. For more details see \code{\link[boot]{boot.ci}}. Studendized confidence intervals are not supported.}
\item{Intra}{a combined string, indicating which intra correlation estimators should be used. All estimators are set as default.
}
\item{Inter}{a combined string, indicating which inter correlation estimators should be used. All estimators are set as default.
}
}
\details{
To give an first insight, the function provides an overview of the several default time series and estimates using different estimators (intra and inter) simultaneously. The plug-in estimates of the intra correlation using inter correlation methods will be estimated via \code{intraMLE}.
If \code{DB} is specified, the single bootstrap corrected estimate will be calculated by using the bootstrap values of the outer loop.
}
\value{
The returned value is a list, containing the following entries:
Estimators_Intra
\item{Sector}{Number of the sector}
\item{Sector_Name}{Name of the sector}
\item{Estimator}{Name of the applied estimator}
\item{Estimate}{Value of the calculated estimate}
\item{Type}{String, which indicating corrected/non-corrected estimates}
\item{correction}{Name of the correction method}
\item{B}{Number of single bootstrap repetitions}
\item{DB}{Number of the double bootstrap repetitions}
\item{CI_Boot}{Selected two-sided bootstrap confidence interval}
\item{CI}{A string, indicating if the corresponding value is the upper or lower bound}
Estimators_Inter
\item{Sector_1}{Number of the sector}
\item{Sector_Name_1}{Name of the sector}
\item{Sector_2}{Number of the sector}
\item{Sector_Name_2}{Name of the sector}
\item{Estimator}{Name of the applied estimator to Sector_1 and Sector_2}
\item{Estimate}{Value of the calculated estimateto Sector_1 and Sector_2}
\item{Type}{String, which indicating corrected/non-corrected estimates}
\item{correction}{Name of the correction method}
\item{B}{Number of single bootstrap repetitions}
\item{DB}{Number of the double bootstrap repetitions}
\item{CI_Boot}{Selected two-sided bootstrap confidence interval}
\item{CI}{A string, indicating if the corresponding value is the upper or lower bound}
}
\author{
Kevin Jakob
}
\references{
\insertRef{botha2010implied}{AssetCorr}
\insertRef{chang2015double}{AssetCorr}
\insertRef{de2002default}{AssetCorr}
\insertRef{dullmann2004systematic}{AssetCorr}
\insertRef{efron1994introduction}{AssetCorr}
\insertRef{frei2017moment}{AssetCorr}
\insertRef{gordy2000comparative}{AssetCorr}
\insertRef{gordy2010small}{AssetCorr}
\insertRef{kalkbrener2010validating}{AssetCorr}
\insertRef{lucas1995default}{AssetCorr}
\insertRef{meyer2009estimation}{AssetCorr}
\insertRef{teetor2011r}{AssetCorr}
}
\seealso{
\code{\link{intraAMM}}, \code{\link{intraFMM}}, \code{\link{intraJDP2}},
\code{\link{intraMLE}}, \code{\link{intraJDP1}}, \code{\link{intraCMM}},
\code{\link{intraMode}},\code{\link{intraBeta}},
\code{\link{interJDP}}, \code{\link{interCopula}},\code{\link{interCMM}},
\code{\link{interCov}}, \code{\link{interMLE}}, \code{\link{intraALL}},
\code{\link{interALL}}
}
\examples{
\donttest{
library(mvtnorm)
set.seed(111)
NoO=1000 #Number of obligors in each sector
Years=20
AC=0.3
PD=0.01
Psi=rmvnorm(Years,sigma=matrix(c(1,0.5,0.5,0.5,1,0.5,0.5,0.5,1),3))
PDcond1=pnorm((qnorm(PD)-sqrt(AC)*Psi[,1])/sqrt(1-AC))
PDcond2=pnorm((qnorm(PD)-sqrt(AC/2)*Psi[,2])/sqrt(1-AC/2))
PDcond3=pnorm((qnorm(PD)-sqrt(AC*2)*Psi[,3])/sqrt(1-AC*2))
DTS=cbind(rbinom(Years,NoO,PDcond1),rbinom(Years,NoO,PDcond2),rbinom(Years,NoO,PDcond3))
N=matrix(NoO,nrow = Years,ncol = 3)
Output<-analyze_AssetCorr(DTS,N)
#Bootstrap Correction and CIs
Output<-analyze_AssetCorr(DTS,N,B=100,CI_Boot=0.95)
#Double Bootstrap Correction and Jackknife
Output<-analyze_AssetCorr(DTS,N,DB=c(50,50),JC=TRUE)
}
}
\keyword{ALL}
\keyword{ALL}% __ONLY ONE__ keyword per line
|
a99a0a5336a37e97f7c17ae39df1a89ea601109b | eac759ea418d8522b239cd420039c5047f34b546 | /man/size_bc.three_way.Rd | 12325469815521697852ac64444fe22773ff7ae7 | [] | no_license | cran/OPDOE | 1dac02018283dcbe1ad0cd8b85736baf930718f4 | cc4f73c76e7a3655ddbc7fc8be069d150182f15d | refs/heads/master | 2021-05-15T01:55:50.474710 | 2018-03-17T21:49:18 | 2018-03-17T21:49:18 | 17,681,194 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,548 | rd | size_bc.three_way.Rd | \name{size_bc.three_way}
\alias{size_bc.three_way_cross.model_4_a_case1}
\alias{size_bc.three_way_cross.model_4_a_case2}
\alias{size_bc.three_way_mixed_cxbina.model_6_a_case1}
\alias{size_bc.three_way_mixed_cxbina.model_6_a_case2}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Three-way analysis of variance -- cross classification (A in B) x C -- model IV,
Three-way analysis of variance -- mixed classification (A in B) x C model VI
}
\description{
Returns the optimal number of levels for factor B and C.
}
\usage{
size_bc.three_way_cross.model_4_a_case1(alpha, beta, delta, a, n, cases)
size_bc.three_way_cross.model_4_a_case2(alpha, beta, delta, a, n, cases)
size_bc.three_way_mixed_cxbina.model_6_a_case1(alpha, beta, delta, a, n, cases)
size_bc.three_way_mixed_cxbina.model_6_a_case2(alpha, beta, delta, a, n, cases)
}
\arguments{
\item{alpha}{
Risk of 1st kind
}
\item{beta}{
Risk of 2nd kind
}
\item{delta}{
The minimum difference to be detected
}
\item{a}{
Number of levels of fixed factor A
}
\item{n}{
Number of replications
}
\item{cases}{
Specifies whether the \code{"maximin"} or \code{"maximin"} sizes are to be determined
}
}
\details{
see chapter 3 in the referenced book
}
\value{
Integers giving the sizes.
}
\references{
Dieter Rasch, Juergen Pilz, L.R. Verdooren, Albrecht Gebhardt: Optimal Experimental Design with R, Chapman and Hall/CRC, 2011
}
\author{
Dieter Rasch, Juergen Pilz, L.R. Verdooren, Albrecht Gebhardt, Minghui Wang
}
\note{
Better use \code{\link{size.anova}} which allows a cleaner notation.
}
\seealso{
\code{\link{size.anova}}
}
\examples{
size_bc.three_way_cross.model_4_a_case1(0.05, 0.1, 0.5, 6, 2, "maximin")
size_bc.three_way_cross.model_4_a_case1(0.05, 0.1, 0.5, 6, 2, "minimin")
size_bc.three_way_cross.model_4_a_case1(0.05, 0.1, 1, 6, 2, "maximin")
size_bc.three_way_cross.model_4_a_case1(0.05, 0.1, 1, 6, 2, "minimin")
size_bc.three_way_cross.model_4_a_case2(0.05, 0.1, 0.5, 6, 2, "maximin")
size_bc.three_way_cross.model_4_a_case2(0.05, 0.1, 0.5, 6, 2, "minimin")
size_bc.three_way_cross.model_4_a_case2(0.05, 0.1, 1, 6, 2, "maximin")
size_bc.three_way_cross.model_4_a_case2(0.05, 0.1, 1, 6, 2, "minimin")
size_bc.three_way_mixed_cxbina.model_6_a_case1(0.05, 0.1, 0.5, 6, 2, "maximin")
size_bc.three_way_mixed_cxbina.model_6_a_case1(0.05, 0.1, 0.5, 6, 2, "minimin")
size_bc.three_way_mixed_cxbina.model_6_a_case2(0.05, 0.1, 0.5, 6, 2, "maximin")
size_bc.three_way_mixed_cxbina.model_6_a_case2(0.05, 0.1, 0.5, 6, 2, "minimin")
}
\keyword{ anova }
|
76402e703220b12701b72ea715dd38c6d17f0fd2 | f1b6fe3a3b3d08c5ea069ec4b548cc0ff2a9b79b | /sa_GenSA_BRCA.R | bb2c933118b03e36484697d408cd394f5c396ee3 | [] | no_license | damiwoj/simulated_annealing | 8b4552e82c9db589c413cad021c9f2fad7c70c1a | 9247c3884e367925ee8203ae1ecd5bdfa3870d68 | refs/heads/master | 2021-01-11T21:07:12.687119 | 2017-01-19T18:42:02 | 2017-01-19T18:42:02 | 79,250,278 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,684 | r | sa_GenSA_BRCA.R | options(stringsAsFactors=FALSE)
library(GenSA)
library(deconstructSigs)
library(quadprog)
ReadMutationalProfile <- function(file) {
#Read data as a matrix
return(data.matrix(read.table(file, sep='\t', header=T, row.names=1, check.names=F)))
}
QPsig <- function(tumour.ref, signatures.ref) {
# This code was addapted from Lynch, F1000 Research, 2016.
# The QPsig function is designed to take input in similar format to the
# whichSignatures function in the deconstructSigs library for easy interchange
# between the two. The output is limited to the equivalent of the `weights'
# slot in the output of whichSignatures.
# we normalize the observations so that they sum to 1
obs<-as.numeric(tumour.ref/sum(tumour.ref))
# to allow use of the deconstructSigs objects we convert to matrices
signatures.ref<-as.matrix(signatures.ref)
# we use the factorized version of solve.QP -
# see the helpfile of that function for details of the required form
# otherwise we would set Dmat = signatures.ref %*% t(signatures.ref) as indicated
# in the article
Rinverse <- backsolve(chol(signatures.ref %*% t(signatures.ref)),
diag(dim(signatures.ref)[1]))
# we also need to define the linear part of the function that we are minimizing
dvec <- (obs) %*% t(signatures.ref)
# we have one constraint that the sum of weights is equal to 1
# we have more constraints that each weight is positive
Amat <- cbind(rep(1,dim(Rinverse)[1]), diag(dim(Rinverse)[1]))
bvec <- c(1,rep(0,dim(Rinverse)[1]))
# we now call the solve.QP function from the quadprog library
myQP<-quadprog::solve.QP(Dmat = Rinverse, dvec = dvec, Amat = Amat, bvec = bvec, meq = 1,
factorized = TRUE)
return(myQP$solution)
}
DecompositionError <- function(observation, weights, signatures) {
#Squared root of squared sum of errors
#Remove NA by replacing it with 0
weights.noNA = weights
weights.noNA[is.na(weights.noNA)] = 0
return(sqrt(sum((observation - signatures %*% weights.noNA)^2)))
}
RunGenSA <- function(spectrum, signatures, control) {
#Wrapper for GenSA function to run simulated annealing
K = ncol(signatures)
DecompositionError.local <- function(weights) {
#objective function to compute estmation error
estimate = signatures %*% weights
return(sqrt(sum((spectrum - (estimate / sum(estimate)))^2)))
}
#run GenSA
sa = GenSA(par=NULL, lower=rep(0.0,K), upper=rep(1.0,K), fn=DecompositionError.local, control=control)
return(sa)
}
RunGenSATrialsWithDesiredError <- function(spectrum, signatures, N, desired.error) {
#run N trials until reaching expected error to sample landscape of almost optimal solutions
error = rep(0.0, N)
weights = matrix(0.0, ncol=N, nrow=ncol(signatures))
rownames(weights) = colnames(signatures)
colnames(weights) = paste0('Trial_', seq(N))
#repeat the simulation N times
for(i in seq(N)) {
#reduce maximum number of iterations of the algorithm (the algorithm restarts anyway after 1282 iterations)
#reduce temperature; we want to explore params around optimal solution (it is a convex function so there is no other local minimum)
#stop when there is no improvement in 1000 steps as this is simple function
#stop when reaching desired error (higher then in optimal solution)
sa = RunGenSA(spectrum, signatures, list(maxit=1000, temperature=100,
threshold.stop=desired.error, nb.stop.improvement=1000, simple.function=T))
#record results
error[i] = sa$value
weights[,i] = sa$par/sum(sa$par) #normalize weights to 1
}
return(list(error=error, weights=weights))
}
PlotCombinedWeights <- function(gsat, gsa.weights, qp.weights, deSig.weights, nmf.weights, spectrum, cosmic, title) {
#plots results of different methods on one plot
RSSE <- function(weights) {
DecompositionError(spectrum,weights,cosmic) #squared root of squared sum of errors
}
#find max weight to set y-axis limits
weight.max = max(gsat$weights, gsa.weights, qp.weights, deSig.weights, nmf.weights, na.rm=T)
#generate subtitle listing error of each method
subtitle = sprintf("GenSA+error(median) %.5f, GenSA %.5f, QP %.5f, deconstructSigs %.5f, NMF %.5f",
median(gsat$error), RSSE(gsa.weights), RSSE(qp.weights), RSSE(deSig.weights), RSSE(nmf.weights))
#boxplot of GenSA trials with increased error
names_sigs = sapply(strsplit(rownames(gsat$weights), ' '), '[', 2)
boxplot(t(gsat$weights), names=names_sigs, col='gray90', xlab='Signatures', ylab='Weights', main=title, sub=subtitle, las=2, ylim=c(0,1.05*weight.max))
#show optimal results of different methods
points(nmf.weights, pch=6, col='darkorange')
points(deSig.weights, pch=2, col='limegreen')
points(qp.weights, pch=13, col='deepskyblue')
points(gsa.weights, pch=1, col='firebrick1')
legend('topright', c('GenSA w/ error','GenSA','QP','deconstructSigs','NMF'),
col=c('gray90','firebrick1','deepskyblue','limegreen','darkorange'), pch=c(15,1,13,2,6))
}
################################################################################
sink('sa_GenSA_BRCA.output')
#BRCA signatures
selected_sigs = c(1,2,3,5,6,8,13,17,18,20,26,30)
### Read data ###
#samples are in columns, mutation types in rows
#cosmic matrix was downloaded from COSMIC website (96x30)
cosmic = ReadMutationalProfile('data/cosmic_signatures_probabilities.txt')
cosmic = cosmic[,selected_sigs]
#spectra are precomputed mutational profiles for all 560 patiens and for a combined dataset (96x561)
#other data can be provided
spectra = ReadMutationalProfile('data/mutation_spectra_BRCA-EU.txt')
#match mutation type order to cosmic order as in file
spectra = spectra[match(rownames(cosmic),rownames(spectra)), ,drop=F]
### Other methods first ###
### Quadratic programming
#run QPsig for each column of spectra (it expects signatures in deconstructSigs form)
#gets a 30x561 matrix of signature weights for each sample
#some weights are negative, but very very close to 0 (e.g. -1e-20)
print('Quadratic programming')
print(date())
QP = apply(spectra, 2, QPsig, t(cosmic))
print(date())
rownames(QP) = colnames(cosmic)
write.csv(QP, file='weights_BRCA/QP.csv')
### deconstructSigs
#Cosmic and signatures.cosmic matrices contains exactly the same data, but are differently organized.
#They are just differently ordered, so watch out when passing parameters.
#Match mutations order to that in package signatures!
#set 'signature.cutoff' 0.0, so we do not miss any signature
#ncol(spectra)
print("deconstructSigs")
print(date())
deSig = do.call(cbind, lapply(seq(ncol(spectra)), function(i) {
ds = whichSignatures(tumor.ref=as.data.frame(t(spectra[match(colnames(signatures.cosmic),rownames(spectra)),i,drop=F])),
signatures.ref=signatures.cosmic, contexts.needed=T, signature.cutoff=0.0, associated=row.names(signatures.cosmic)[selected_sigs])
return(t(ds$weights))
}))
deSig = deSig[selected_sigs,] #deconstructSigs still output all signatures, so we have to limit it again
print(date())
rownames(deSig) = colnames(cosmic)
write.csv(deSig, file='weights_BRCA/deSig.csv')
### NMF
print("NMF - read and process data")
print(date())
#read the supplementary table 21 by S. Nik-Zainal (Nature, 2016)
#containing weights of signatures identified by NMF
nmf.data = read.csv('data/Table_SignatureContribution__SupplTab21.csv', check.names=F)
nmf.data_samples = nmf.data[,'Sample Name']
nmf.data = as.matrix(nmf.data[,2:13]) #remove first and last column (not needed) and transform to matrix
rownames(nmf.data) = nmf.data_samples
nmf.data = rbind(nmf.data, All=colSums(nmf.data)) #add summary row and name it 'All' (the same as in spectra)
nmf.data = nmf.data[match(colnames(spectra),rownames(nmf.data)),] #match sample names to that in file in spectra
nmf.data = apply(nmf.data, 2, "/", rowSums(nmf.data))
#create a new matrix with weights for all signatures and update based on nmf.data
NMF = matrix(NA, nrow=ncol(cosmic), ncol=nrow(nmf.data),
dimnames=list(colnames(cosmic), rownames(nmf.data)))
NMF[match(colnames(nmf.data), colnames(cosmic)),] = t(nmf.data)
write.csv(NMF, file='weights_BRCA/NMF.csv')
print(date())
### Simulated Annealing ###
### Generalized Simulated Annealing by Y. Xiang (The R Journal, 2013) implemented in R package 'GenSA'
#run long SA to determine best solution using default params
set.seed(1234)
print("Simulated annealing - long run")
print(date())
i = 0 #just to print status
GSA = apply(spectra, 2, function(spectrum) {
i <<- i+1 #just to print status
print(c(colnames(spectra)[i],date())) #just to print status
gsa = RunGenSA(spectrum, cosmic, control=list(simple.function=T))
gsa$par/sum(gsa$par) #normalize to 1
})
print(date())
rownames(GSA) = colnames(cosmic)
write.csv(GSA, file='weights_BRCA/GenSA.csv')
### Test GenSA different levels of allowed error (only on combined data)
print("Simulated annealing - testing different errors")
print(date())
set.seed(1234)
pdf('plots/boxplot_BRCA_all_diff_errors.pdf', height=11, width=8.5)
par(mfrow=c(3,1))
error.GSA.all = DecompositionError(spectra[,'All'], GSA[,'All'], cosmic)
for(error.increase in c(0.001,0.01,0.05,0.1)) {
print(c(error.increase, date()))
gsat = RunGenSATrialsWithDesiredError(spectra[,'All'], cosmic, 1000, (1+error.increase)*error.GSA.all)
PlotCombinedWeights(gsat, GSA[,'All'], QP[,'All'], deSig[,'All'], NMF[,'All'], spectra[,'All'], cosmic,
paste0('Boxplots based on SA for optimal GenSA error * ',1+error.increase,' (',100*error.increase,'%)','\n','All samples (12 sigs: 1,2,3,5,6,8,13,17,18,20,26,30)'))
}
par(mfrow=c(1,1))
dev.off()
print(date())
### Simulated annealing with increased error by 1% and 5% for all samples
for(error.increase in c(0.01,0.05)) {
print(sprintf("Simulated annealing with error increase by %.2f fold", 1+error.increase))
dir_weight = paste0('weights_BRCA/GenSA_trials_error_',error.increase)
dir.create(dir_weight, showWarnings = FALSE)
print(date())
set.seed(1234)
pdf(paste0('plots/boxplot_BRCA_samples_error_',error.increase,'.pdf'), height=11, width=8.5)
par(mfrow=c(3,1))
for(i in seq(ncol(spectra))) {
sample = colnames(spectra)[i]
print(c(sample,date()))
error.GSA = DecompositionError(spectra[,i], GSA[,i], cosmic)
gsat = RunGenSATrialsWithDesiredError(spectra[,i], cosmic, 100, (1+error.increase)*error.GSA)
PlotCombinedWeights(gsat, GSA[,i], QP[,i], deSig[,i], NMF[,i], spectra[,i], cosmic,
paste0(sample,'(optimal GSA error * ',1+error.increase,')\n','(12 sigs: 1,2,3,5,6,8,13,17,18,20,26,30)'))
write.csv(gsat$weights, paste0(dir_weight,'/',sample,'.csv'))
}
par(mfrow=c(1,1))
dev.off()
print(date())
}
sink()
|
d01139a0b0d6b1783c4f2f5c569f14e3eda6600e | c1882af529b91cfe1076d7322a4b64e4316bdb4f | /03-basic_aggregates.r | ecd382fadf72662c294295aef2145802850ff462 | [] | no_license | nings-archive/NUS-Kent-Ridge-Bird-Count | 3601170b9cf1a54d26b7668db395786057324f28 | cde95022bad7310032b97390d0bb384dbd4537ec | refs/heads/master | 2021-03-27T17:32:36.319090 | 2018-03-13T12:39:02 | 2018-03-13T12:39:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,079 | r | 03-basic_aggregates.r | #! /usr/bin/env Rscript
# Calculates, basic aggregates values
# 1. Total observations
# 2. Total selected 5 species observations
# 3. Observations for each of the 5 species
# 4. Histogram for (3)
library(ggplot2)
library(plyr)
df <- read.csv('data.csv')
filter_species <- function(df, species) {
# @returns observations for Bird.Species == species,
# and Distance Bin != na
df <- df[df$Bird.Species == species, ]
df <- df[!is.na(df$Distance.Bin), ]
df
}
count_locations <- function(df) {
counts <- count(df$Location.Hash)
nrow(counts)
}
all_obs <- df[!is.na(df$Bird.Species), ]
all_obs <- all_obs[!is.na(df$Distance.Bin), ]
cat('All valid observations:', nrow(all_obs), fill=TRUE)
cat('Locations with all valid observations:',
count_locations(all_obs), fill=TRUE)
myna <- filter_species(df, 'Javan myna')
bulbul <- filter_species(df, 'Yellow-vented bulbul')
sunbird <- filter_species(df, 'Olive-backed sunbird')
pigeon <- filter_species(df, 'Rock pigeon')
oriole <- filter_species(df, 'Black-naped oriole')
cat('Javan myna observations:', nrow(myna), fill=TRUE)
cat('Yellow-vented bulbul observations:', nrow(bulbul), fill=TRUE)
cat('Olive-backed sunbird observations:', nrow(sunbird), fill=TRUE)
cat('Rock pigeon observations:', nrow(pigeon), fill=TRUE)
cat('Black-naped oriole observations:', nrow(oriole), fill=TRUE)
cat('Total observations:',
nrow(myna) + nrow(bulbul) + nrow(sunbird) + nrow(pigeon) + nrow(oriole),
fill=TRUE
)
selected_obs <- rbind(myna, bulbul, sunbird, pigeon, oriole)
cat('Locations with observations of selected species:',
count_locations(selected_obs), fill=TRUE)
counts_per_bird <- count(selected_obs$Bird.Species)
names(counts_per_bird) <- c('Bird Species', 'Observations')
g <- ggplot(selected_obs) +
geom_bar(aes(x=Bird.Species, fill=Bird.Species), stat='count') +
labs(y='Observations', x='Bird Species') +
guides(fill=FALSE) # remove legend
# TODO: sort bars in descending/ascending order
ggsave('03-basic_aggregates.png', width=6, height=4)
|
7a67fa88c2fae173f6305a9b55731a76bc3803cc | 451b472c863d646ad87d20891750b3c52efddf35 | /logisticReg/man/dot-orthogonal_basis.Rd | 218a0539bbbe5fdc9ed48d07cba1ca546e28c35e | [
"MIT"
] | permissive | linnykos/logisticReg | 4969b03ad39dbdd81ed47f983f3a2746ae040b40 | 0da93fa879d334e23ef1e991e8cda41c0edd3ce9 | refs/heads/master | 2022-03-17T03:20:25.568275 | 2019-09-17T15:45:59 | 2019-09-17T15:45:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 532 | rd | dot-orthogonal_basis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_algebra.R
\name{.orthogonal_basis}
\alias{.orthogonal_basis}
\title{Construct the orthogonal basis}
\usage{
.orthogonal_basis(mat)
}
\arguments{
\item{mat}{matrix}
}
\value{
matrix
}
\description{
Given a matrix, construct a orthonormal basis (i.e., basis
where the columns are orthogonal to each other and has unit norm)
of the vector space orthogonal to the column space of \code{mat}.
}
\details{
This function uses random number generation.
}
|
bed1513c4b59cddff65eac56095585fabfde2ebe | 4d3c3ade00c2543400c76f811879c5beb9c92719 | /Application/03_spatial_xwalk.R | 7757b9727394f5dedcd03e389591891896a87226 | [] | no_license | tilluz/geomatching_open | 68a0e5923fa922bf8054b005539a858c197da329 | 7f34851e3abbbe9e8c789f4bab3e38c8dd8f5296 | refs/heads/master | 2021-01-04T10:20:37.688409 | 2020-08-18T14:26:48 | 2020-08-18T14:26:48 | 240,503,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,333 | r | 03_spatial_xwalk.R | #######################################################################################
### This file is designed to
### 1) create a crosswalk that allows an allocation across different spatial levels ###
#######################################################################################
### Load required packages
library(data.table)
library(geosphere)
### Load required functions
source('00_directories.R')
source('helpers/functions_census.R')
### Read data files
setwd(midsave)
crca_info <- read.csv('crca_info.csv'); setnames(crca_info,'OBJECTID_1','com') # Generated in 'ind_allocation_and_dep_vars.R'
tow2com <- fread('com_allocation_exact.csv') # Generated in 'tow_to_com.R'
setwd(main)
### Combine the tower-to-commune allocation and the revised region code
xwalk <- as.data.table(merge(crca_info, tow2com, by = 'com', all.y = T))
setnames(xwalk, 'site_id', 'tower')
### Create dakar specific centroid and calculate distance on tower level
centroid <- xwalk[xwalk$DEPT == "DAKAR",]
centroid <- data.table(lon = mean(centroid$lon), lat = mean(centroid$lat))
### Calculate the distance of the towers to the centroid of Dakar to get average distance of calls and sms
xwalk$dist2d <- distHaversine(xwalk[,list(lon,lat)],centroid)/1000
rm(centroid)
### Align format of crosswalk
# Change variable type
xwalk$REG <- as.character(xwalk$REG)
xwalk$DEPT <- as.character(xwalk$DEPT)
xwalk$ARR <- as.character(xwalk$ARR)
# Correct region names
xwalk$REG <- gsub('-', '_', xwalk$REG)
# Correct department names
xwalk$DEPT <- gsub('-', '_', xwalk$DEPT)
xwalk$DEPT <- gsub('?''', '', xwalk$DEPT) # The ' has to be adjusted every time! unique(xwalk$DEPT)
# Correct arrondissement names
xwalk$ARR <- gsub('-', '_', xwalk$ARR)
xwalk$ARR[xwalk$ARR == "DAROU MINAME II"] <- "DAROU MINAM II"
xwalk$ARR[xwalk$ARR == "MEUR MOMAR SARR"] <- "KEUR MOMAR SARR"
xwalk$ARR[xwalk$ARR == "NDIAYE BERESS"] <- "NDIAYE MBERESS"
xwalk$ARR[xwalk$ARR == "SARA BIDJI"] <- "SARE BIDJI"
# Change spaces for underlines
xwalk$REG <- gsub(' ', '_', xwalk$REG)
xwalk$DEPT <- gsub(' ', '_', xwalk$DEPT)
xwalk$ARR <- gsub(' ', '_', xwalk$ARR)
# Write file
setwd(midsave)
write.csv(xwalk, 'spatial_xwalk_exact.csv', row.names = F)
setwd(main)
############################################################################################################
|
4eb4bf57b79f78471e0e2beb7b2677539ce2fc11 | 67de61805dd839979d8226e17d1316c821f9b1b4 | /inst/models/enormous/lib/stderrlib.R | 6360213c3b83a6dbbb64b3c63d089b8feb232fc0 | [
"Apache-2.0"
] | permissive | falkcarl/OpenMx | f22ac3e387f6e024eae77b73341e222d532d0794 | ee2940012403fd94258de3ec8bfc8718d3312c20 | refs/heads/master | 2021-01-14T13:39:31.630260 | 2016-01-17T03:08:46 | 2016-01-17T03:08:46 | 49,652,924 | 1 | 0 | null | 2016-01-14T14:41:06 | 2016-01-14T14:41:05 | null | UTF-8 | R | false | false | 17,422 | r | stderrlib.R | # tools for running standard error simulation studies
condNumLimit <- 1e7
calcCondNum <- function(hess) {
d <- try(svd(hess, nu=0, nv=0)$d)
if (is(d, "try-error")) return(1e16)
if (all(d > 0)) {
max(d)/min(d)
} else {
1e16
}
}
MCphase <- function(modelGen, reps=500, verbose=TRUE, maxCondNum) {
emcycles <- rep(NA, reps)
condnum <- rep(NA, reps)
est <- matrix()
for (rep in 1:reps) {
set.seed(rep)
model <- modelGen()
em <- model$compute
getCondNum <- list(mxComputeOnce('fitfunction', 'information', 'meat'),
mxComputeReportDeriv())
plan <- mxComputeSequence(c(em, getCondNum))
model$compute <- plan
fit <- try(mxRun(model, silent=TRUE, suppressWarnings = TRUE), silent=TRUE)
if (inherits(fit, "try-error")) {
print(fit)
condnum[rep] <- 1e16
next
} else if (fit$output$status$code != 0) {
print(paste("status code", fit$output$status$code))
next
}
emstat <- fit$compute$steps[[1]]$output
emcycles[rep] <- emstat$EMcycles
condnum[rep] <- calcCondNum(fit$output$hessian)
par <- omxGetParameters(fit)
if (any(is.na(par))) {
print(par)
condnum[rep] <- 1e16
next
}
if (verbose) print(paste(c(rep, emstat, round(condnum[rep])), collapse=" "))
if (all(dim(est) == 1)) {
est <- matrix(NA, length(par), reps)
rownames(est) <- names(par)
}
est[,rep] <- par
}
list(condnum=condnum, est=est)
}
getMCdata <- function(name, modelGen, correct, recompute=FALSE, reps=500,
envir=parent.frame(), maxCondNum) {
if (missing(maxCondNum)) stop("Provide a maxCondNum")
correct <- c(correct)
rda <- paste("data/", name, ".rda", sep="")
if (!recompute) {
if (file.exists(rda)) {
load(rda, envir=envir)
} else if (file.exists(paste("models/enormous/", rda, sep=""))) {
load(paste("models/enormous/", rda, sep=""), envir=envir)
} else {
recompute <- TRUE
}
}
if (recompute) {
got <- MCphase(modelGen, reps, maxCondNum=maxCondNum)
mcMask <- rep(TRUE, reps)
if (!is.na(maxCondNum)) {
mcMask <- !is.na(got$condnum) & got$condnum < maxCondNum
}
est <- got$est
mcEst <- apply(est[,mcMask], 1, mean)
bias <- mcEst - correct
if (reps < length(correct)) stop("Not enough replications to estimate the Hessian")
mcCov <- cov(t(est))
#max(abs(apply(est, 1, sd) - sqrt(diag(mcCov))))
mcHessian <- solve(mcCov/2)
mcBias <- bias
mcSE <- sqrt(2*diag(solve(mcHessian)))
save(mcMask, mcBias, mcSE, mcHessian, file=rda)
if (!is.na(maxCondNum)) {
cat(paste("Note:", sum(!mcMask), "excluded due to condition number\n"))
}
cat("Monte-Carlo study complete. Proceeding with accuracy benchmark.\n")
# stop("stopped here")
load(rda, envir=envir) # copy to parent environment
}
}
# This was written in terms of the information matrix, but
# I switched it to check the parameter covariance matrix (invert everything).
mvn_KL_D <- function(invH, H) {
pcm <- solve(mcHessian)
.5*(tr(invH %*% pcm) - nrow(H) - log(det(pcm)/det(H)))
}
summarizeInfo1 <- function(condnum, emstat=list(EMcycles=NA, semProbeCount=NA),
H, standardErrors, cputime, method) {
numReturn <- 6
if (!is.na(condnum) && condnum > condNumLimit) return(rep(NA, numReturn))
normH <- NA
if (!is.null(H) && all(eigen(H, only.values =TRUE)$val > 0)) {
# normH <- norm(H - mcHessian, "2")
# normH <- norm(H %*% solve(mcHessian) - diag(nrow(H)), "2")
# D_KL(H | mcHessian) -- backwards
# normH <- .5*(tr(solve(mcHessian) %*% H) - nrow(H) - log(det(H)/det(mcHessian)))
# D_KL(mcHessian | H)
iH <- try(solve(H), silent=TRUE)
if (is(iH, "try-error")) return(rep(NA, numReturn))
normH <- mvn_KL_D(H, iH)
}
normRd <- NA
rd <- (standardErrors - mcSE) / mcSE
if (!is.na(condnum)) {
if (all(is.finite(rd))) {
normRd <- norm(rd, "2")
} else {
print(paste("Method", method,"condition number", condnum, "but some SEs are NA"))
condnum <- NA
}
}
got <- c(cputime, emstat$EMcycles, emstat$semProbeCount, condnum, normH, normRd)
if (length(got) != numReturn) {
print('wrong length')
print(got)
return(rep(NA, numReturn))
} else {
return(got)
}
}
summarizeInfo <- function(fitModel, method) {
emstat <- list(EMcycles=NA, semProbeCount=NA)
if (length(intersect(c('mr', 'tian', 'agile'), method))) {
emstat <- fitModel$compute$steps[[1]]$output
}
if (fitModel$output$status$code != 0) {
summarizeInfo1(NA, emstat, NULL, NULL,
fitModel$output$cpuTime, method)
return
}
H <- fitModel$output$hessian
if (is.null(H)) H <- fitModel$output$ihessian
condnum <- calcCondNum(H)
H <- NULL
if (!is.na(condnum) && condnum < 1e12) {
if (!is.null(fitModel$output[['hessian']])) {
H <- fitModel$output[['hessian']]
}
if (is.null(H) && !is.null(fitModel$output[['ihessian']])) {
H <- solve(fitModel$output[['ihessian']])
}
}
summarizeInfo1(condnum, emstat, H, fitModel$output$standardErrors,
fitModel$output$cpuTime, method)
}
summarizeDetail <- function(detail, maxCondNum=NA) {
mask <- rep(TRUE, dim(detail)[3])
if (!is.na(maxCondNum)) {
mask <- apply(is.na(detail['condnum',,]) | detail['condnum',,] < maxCondNum, 2, all)
detail <- detail[,,mask]
}
excluded <- 0
if (dim(detail)[3] > 1) {
excluded <- apply(detail['condnum',,], 1, function (c) sum(is.na(c)))
}
print(round(rbind(excluded, apply(detail, 1:2, mean, na.rm=TRUE)), 4))
cat(paste(" N=", sum(mask), "\n", sep=""))
}
testPhase <- function(modelGen, reps = 500, verbose=TRUE, methods=c('agile', 'meat')) {
methods <- setdiff(methods, "oakes")
rec <- c('cputime', 'emcycles', 'probes', 'condnum', 'hNorm', 'rdNorm')
detail <- array(NA, dim=c(length(rec), length(methods), reps),
dimnames=list(rec, methods, NULL))
for (rep in 1:reps) {
warnings()
set.seed(rep)
model <- modelGen()
em <- model$compute
fit <- NULL # needed for MLE
fitfun <- c()
if (is(em$mstep, "MxComputeSequence")) {
fitfun <- sapply(em$mstep$steps, function(step) step$fitfunction)
} else {
fitfun <- em$mstep$fitfunction
}
sem <- intersect(c('mr', 'tian'), methods)
if (length(sem)) {
em$accel <- ""
em$tolerance <- 1e-11
em$maxIter <- 750L
for (semType in sem) {
em$information <- "mr1991"
em$infoArgs <- list(fitfunction=fitfun, semMethod=semType, semTolerance=sqrt(1e-6))
plan <- mxComputeSequence(list(
em,
mxComputeStandardError(),
mxComputeReportDeriv()
))
model$compute <- plan
fit <- try(mxRun(model, silent=TRUE, suppressWarnings=TRUE), silent=TRUE)
if (inherits(fit, "try-error")) {
print(paste("error in", semType))
print(fit)
next
} else if (fit$output$status$code != 0) {
print(paste("status code", fit$output$status$code, "without acceleration"))
break
} else {
detail[,semType,rep] <- summarizeInfo(fit, semType)
}
}
}
# need the MLE
if (is.null(fit) || inherits(fit, "try-error")) {
em$tolerance <- 1e-11
model$compute <- em
fit <- try(mxRun(model, silent=TRUE, suppressWarnings = TRUE), silent=TRUE)
if (inherits(fit, "try-error")) {
print(paste("error finding MLE"))
print(fit)
next
} else if (fit$output$status$code != 0) {
print(paste("status code", fit$output$status$code))
next
}
}
if (length(intersect(methods, "agile"))) {
em$accel <- 'ramsay1975'
em$tolerance <- 1e-11
em$information <- "mr1991"
em$infoArgs <- list(fitfunction=fitfun, semMethod="agile")
plan <- mxComputeSequence(list(
em,
mxComputeStandardError(),
mxComputeReportDeriv()
))
if (is.null(fit)) fit <- model
fit$compute <- plan
# reuse the MLE, if possible
fit <- try(mxRun(fit, silent=TRUE, suppressWarnings = TRUE), silent=TRUE)
if (inherits(fit, "try-error")) {
print(paste("error in agile"))
print(fit)
next
} else if (fit$output$status$code != 0) {
print(paste("status code", fit$output$status$code, "in agile"))
next
} else {
detail[,"agile",rep] <- summarizeInfo(fit, "agile")
}
}
if (length(intersect(methods, "meat"))) {
meat <- mxModel(fit,
mxComputeSequence(steps=list(
mxComputeOnce('fitfunction', 'information', "meat"),
mxComputeStandardError(),
mxComputeReportDeriv())))
meat <- mxRun(meat, silent=TRUE)
detail[,"meat",rep] <- summarizeInfo(meat, "meat")
}
if (length(intersect(methods, "sandwich"))) {
sandwich <- mxModel(fit,
mxComputeSequence(steps=list(
mxComputeOnce('fitfunction', 'information', "sandwich"),
mxComputeStandardError(),
mxComputeReportDeriv())))
sandwich <- mxRun(sandwich, silent=TRUE)
detail[,"sandwich",rep] <- summarizeInfo(sandwich, "sandwich")
}
if (length(intersect(methods, c("oakes")))) {
em$information <- "oakes1999"
em$infoArgs <- list(fitfunction=fitfun)
plan <- mxComputeSequence(list(
em,
mxComputeStandardError(),
mxComputeReportDeriv()
))
fit$compute <- plan
# reuse the MLE
fit <- try(mxRun(fit, silent=TRUE, suppressWarnings = TRUE), silent=TRUE)
if (inherits(fit, "try-error")) {
print(paste("error in agile"))
print(fit)
next
} else if (fit$output$status$code != 0) {
print(paste("status code",fit$output$status$code,"in agile"))
next
} else {
detail[,"oakes",rep] <- summarizeInfo(fit, "oakes")
}
}
if (length(intersect(methods, "estepH"))) { # should be Mstep, oops
estepH <- mxModel(fit,
mxComputeSequence(steps=list(
mxComputeOnce(em$expectation, 'scores'),
mxComputeOnce(fitfun, 'information', "hessian"),
mxComputeStandardError(),
mxComputeReportDeriv())))
estepH <- mxRun(estepH, silent=TRUE)
detail[,"estepH",rep] <- summarizeInfo(estepH, "estepH")
}
if (length(intersect(methods, "re"))) {
re <- mxModel(fit,
mxComputeSequence(steps=list(
mxComputeNumericDeriv(stepSize = 1e-3, iterations = 2),
mxComputeStandardError(),
mxComputeReportDeriv())))
re <- mxRun(re, silent=TRUE)
detail[,"re",rep] <- summarizeInfo(re, "re")
}
if (verbose) {
summarizeDetail(detail)
}
}
detail
}
quantifyAsymmetry <- function(info) {
sym1 <- (info + t(info))/2
sym2 <- try(chol(solve(sym1)), silent=TRUE)
if (inherits(sym2, "try-error")) return(NA)
asymV <- (info - t(info))/2
norm(sym2 %*% asymV %*% sym2, type="2")
}
summarizeAgile <- function(fit) {
numReturn <- 4
condnum <- calcCondNum(fit$output$ihessian)
if (is.null(condnum)) condnum <- NA
if (is.na(condnum) || (!is.na(condnum) && condnum > condNumLimit)) return(rep(NA, numReturn))
H <- fit$compute$steps[[1]]$debug$outputInfo
if (is.null(H)) return(rep(NA, numReturn))
# Jamshidian (2000) defined this in terms of the inverse Hessian
# even though it seems to work regardless of the inverse.
asym <- quantifyAsymmetry(solve(H))
# max(abs((H + t(H))/2 - solve(fit$output[['ihessian']]))) # == 0
H <- (H + t(H))/2
normH <- NA
if (!is.null(H) && all(eigen(H, only.values =TRUE)$val > 0)) {
# normH <- norm(H - mcHessian, "2")
# normH <- norm(H %*% solve(mcHessian) - diag(nrow(H)), "2")
# D_KL(H | mcHessian) -- backwards
# normH <- .5*(tr(solve(mcHessian) %*% H) - nrow(H) - log(det(H)/det(mcHessian)))
# D_KL(mcHessian | H)
iH <- try(solve(H), silent=TRUE)
if (is(iH, "try-error")) return(rep(NA, numReturn))
normH <- mvn_KL_D(H, iH)
}
normRd <- NA
rd <- (fit$output$standardErrors - mcSE) / mcSE
if (all(is.finite(rd))) {
normRd <- norm(rd, "2")
}
c(condnum, asym, normH, normRd)
}
summarizeASEM <- function(detail) {
excluded <- apply(detail[,'condnum',], 1, function (c) sum(is.na(c)))
grid <- cbind(excluded,
apply(detail, 1:2, mean, na.rm=TRUE),
apply(detail, 1:2, var, na.rm=TRUE))
cperm <- c(1, 2,6, 3,7, 4,8, 5,9)
print(round(grid[,cperm], 4))
}
studyASEM <- function(modelGen, reps = 100, verbose=TRUE) {
targets=c(seq(-8.1, -3.9, .2), seq(-5.8, -4.4, .2))
# should not see any order effects, but just to check
targets <- targets[order(runif(length(targets)))]
rec <- c('condnum', 'asym', 'hNorm', 'rdNorm')
detail <- array(NA, dim=c(length(targets), length(rec), reps),
dimnames=list(targets, rec, NULL))
for (rep in 1:reps) {
set.seed(rep)
model <- modelGen()
em <- model$compute
em$tolerance <- 1e-10 # this only affects the MLE, not the individual trials
em$information <- "mr1991"
fitfun <- c()
if (is(em$mstep, "MxComputeSequence")) {
fitfun <- sapply(em$mstep$steps, function(step) step$fitfunction)
} else {
fitfun <- em$mstep$fitfunction
}
fit <- NULL
for (tx in 1:length(targets)) {
if (is.null(fit) || inherits(fit, "try-error")) fit <- model
em$infoArgs=list(fitfunction=fitfun, semMethod="agile", semDebug=TRUE,
noiseTarget=exp(targets[tx]), semFixSymmetry=TRUE)
plan <- mxComputeSequence(list(
em,
mxComputeStandardError(),
mxComputeReportDeriv()
))
fit$compute <- plan
fit <- try(mxRun(fit, silent=TRUE), silent=TRUE)
if (inherits(fit, "try-error")) {
# print(paste("error in agile"))
# print(fit)
next
} else {
detail[tx,,rep] <- summarizeAgile(fit)
}
}
if (verbose) summarizeASEM(detail)
}
detail
}
checkSmoothness <- function(mkmodel, probePoints=50) {
set.seed(which(mcMask)[1]) # any positive definite model
model <- mkmodel()
em <- model$compute
fitfun <- c()
if (is(em$mstep, "MxComputeSequence")) {
fitfun <- sapply(em$mstep$steps, function(step) step$fitfunction)
} else {
fitfun <- em$mstep$fitfunction
}
em$information <- "mr1991"
em$tolerance <- 1e-9
em$infoArgs <- list(fitfunction='fitfunction', semDebug=TRUE,
semMethod=seq(.0005, .01, length.out=probePoints))
model$compute <- em
model <- mxRun(model, silent=TRUE)
em <- model$compute
phl <- em$debug$paramHistLen
probeOffset <- em$debug$probeOffset
semDiff <- em$debug$semDiff
upper <- 20
modelfit <- list()
result <- data.frame()
for (vx in 1:length(model$output$estimate)) {
len <- phl[vx]
offset <- probeOffset[1:len, vx]
dd <- semDiff[1:(len-1), vx]
mid <- offset[1:(len-1)] + diff(offset)/2
mask <- abs(diff(offset)) < .01 & dd < upper
df <- data.frame(mid=mid[mask], diff=dd[mask])
m1 <- lm(diff ~ 0 + I(1/mid^2), data=df)
modelfit[[vx]] <- m1
df$model <- predict(m1)
result <- rbind(result, cbind(vx=vx, vname=names(model$output$estimate)[vx], df))
}
list(result=result, fits=modelfit, modelfit=sapply(modelfit, function(m) summary(m)$r.squ))
}
if (0) {
# the worst fitting
ggplot(subset(smooth$result, vx %in% order(smooth$modelfit)[1:4])) +
geom_point(aes(mid, diff), size=2) + geom_line(aes(mid, model), color="green") +
facet_wrap(~vname) + labs(x="x midpoint")
}
# Slow version:
#
# if (length(intersect(methods, "oaks"))) {
# require(numDeriv)
# stopifnot(sum(fit$item$free) == length(fit$output[['estimate']]))
#
# ejacob <- jacobian(function(eitemNew) {
# eitem <- fit$item$values
# eitem[fit$item$free] <- eitemNew
# pm <- fit
# pm$expectation$EstepItem <- eitem
# pm$compute <- mxComputeSequence(list(
# mxComputeOnce('expectation', 'scores'),
# mxComputeOnce('fitfunction', 'gradient'),
# mxComputeReportDeriv()))
# pm <- mxRun(pm, silent=TRUE)
# grad <- pm$output$gradient
# # print(grad)
# grad
# }, fit$item$values[fit$item$free],
# method="simple", method.args=list(eps=1e-3)
# # method="Richardson", method.args=list(r=2, d=1e-3, eps=1e-3)
# )
#
# ejacob <- (ejacob + t(ejacob))/2 # usually almost symmetric anyway
#
# estepH <- mxModel(fit,
# mxComputeSequence(steps=list(
# mxComputeOnce(em$expectation, 'scores'),
# mxComputeOnce(fitfun, 'information', "hessian"),
# mxComputeReportDeriv())))
# estepH <- mxRun(estepH, silent=TRUE)
# H <- estepH$output$hessian + ejacob
# se <- sqrt(2*diag(solve(H)))
# ev <- eigen(H, TRUE, only.values=TRUE)$val
#
# detail[,"oaks",rep] <-
# summarizeInfo1(condnum=max(ev)/min(ev), H=H, standardErrors=se, cputime=NA, method="oaks")
# }
|
5d66078a3a88174b4a0540b93b929abb02db0601 | 33270f2836f639b1e40164f01330158fee726314 | /man/predict.MoE_expert.Rd | 50d4fbf20fd1bd7341e863d281d3d79a00b9dfd0 | [] | no_license | Keefe-Murphy/MoEClust | eca02f4c830d38acacc338a6a3b7e571f375c4ce | 510699cff737a3ada9816d3b1dc2be58adc8e6e4 | refs/heads/master | 2022-12-22T02:33:33.138887 | 2022-11-26T19:27:21 | 2022-12-20T12:16:40 | 99,841,011 | 6 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,970 | rd | predict.MoE_expert.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{predict.MoE_expert}
\alias{predict.MoE_expert}
\alias{fitted.MoE_expert}
\alias{residuals.MoE_expert}
\title{Predictions from MoEClust expert networks}
\usage{
\method{predict}{MoE_expert}(object,
newdata = NULL,
simplify = FALSE,
droplevels = FALSE,
...)
\method{fitted}{MoE_expert}(object,
...)
\method{residuals}{MoE_expert}(object,
...)
}
\arguments{
\item{object}{An object of class \code{"MoE_expert"} (typically \code{x$expert}, where \code{x} is of class \code{"MoEClust"}).}
\item{newdata}{A matrix or data frame of test examples. If omitted, the fitted values are used.}
\item{simplify}{Logical indicating whether to simplify the output (in the form of a list) to a 3-dimensional array with dimensions given by the number of new observations, the number of variables, and the number of clusters. The first dimension of such an array is of length \code{1} when there are no expert network covariates, in which case the entries correspond to \code{object$parameters$mean}. Defaults to \code{FALSE}.}
\item{droplevels}{A logical indicating whether unseen factor levels in categorical variables within \code{newdata} should be dropped (with \code{NA} predicted in their place). Defaults to \code{FALSE}. See \code{\link{drop_levels}}.}
\item{...}{Catches unused arguments or allows the \code{simplify} argument to be passed through \code{fitted} and \code{residuals}.}
}
\value{
For \code{simplify=FALSE}, either a list of vectors or predictions (for univariate data) or a list of matrices of predictions (for multivariate data). These lists are of the same length as number of non-noise components in the fitted model. When \code{simplify=TRUE}, a 3-dimensional array of predictions is returned, with respective dimensions given by the number of observations, variables, and non-noise components.
}
\description{
Predictions (point estimates) of observation-specific component means from each (non-noise) component's expert network linear regression.
}
\details{
This function is effectively just a shortcut to \code{lapply(x$expert, predict.lm, newdata=...)}. It can also be thought of as a wrapper to \code{\link{predict.MoEClust}(x, ...)$mean}, although it returns a list (by default) rather than a 3-dimensional array and also \emph{always} preserves the dimensions of \code{newdata}, even for models without expert network covariates.
}
\examples{
data(CO2data)
res <- MoE_clust(CO2data$CO2, G=3, equalPro=TRUE, expert= ~ GNP, network.data=CO2data)
predict(res$expert)
# Try with newdata and simplify=TRUE
predict(res$expert, newdata=CO2data[1:5,"GNP", drop=FALSE], simplify=TRUE)
}
\seealso{
\code{\link{predict.MoEClust}}, \code{\link[stats]{lm}}, \code{\link{predict.MoE_gating}}, \code{\link{drop_levels}}
}
\author{
Keefe Murphy - <\email{keefe.murphy@mu.ie}>
}
\keyword{prediction}
\keyword{utility}
|
2a20a6b6babdec61a6e902b50e7c12870f3e1deb | cf901d7f62bca1699b40d00d0cafee39aeace99f | /4_plot.R | 98c511690df5f60e342e4c59f3b3669d0269a9b5 | [] | no_license | vojtak/Earthquakes | d4d989231605cab482998c7a4e6ddaa8f7b4cf58 | 5d91dd938263c06c00a0fe2ffe7e322cfbe7eb32 | refs/heads/master | 2021-01-21T10:12:49.246178 | 2014-06-11T13:57:32 | 2014-06-11T13:57:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,113 | r | 4_plot.R | # coastline
points<-vector("logical",20)
points[1]<-TRUE
coast<-read.table(coastline_file_path)[points,]
# density plot
source("Z_image_scale.R")
# taking care of outliers (somethig strange happen at IZU in 2000 :) )
count<-0
dif<-1
while(dif>0){
maximum<-max(mapa)
if(maximum-max(mapa[mapa<0.95*maximum])>0.2*maximum){
top<-max(mapa[mapa<0.95*maximum])
} else{
top<-maximum
}
mapa[mapa>=0.95*maximum]<-top
dif<-maximum-top
count<-count+1
}
# defining color scheme
breaks <- seq(min(mapa), top,length.out=top-1)
pal.1=colorRampPalette(c("white", "red","brown"), space="rgb")
# actual plot
layout(matrix(c(1,2), nrow=2, ncol=1),heights=c(20,6))
par(mar=c(5, 3, 2, 2))
par(pty="s")
image(lons,lats,t(mapa),col=pal.1(length(breaks)-1), breaks=breaks,
asp=1,
xlab="longitude",ylab="latitude")
points(coast,pch=2,cex=0.01)
par(mar=c(4, 3,2, 2))
par(pty="m")
image.scale(mapa, col=pal.1(length(breaks)-1), breaks=breaks, horiz=TRUE,xlab="color scale")
|
e973a84d8ca687991cf4d43c09e9709562f09b72 | e56c98512229172467f1f4f99870ed2aac5324cd | /man/varImportance.Rd | 7c0c20d25cdacd61a0bffde3c8bba28b1ddc14be | [] | no_license | babaknaimi/sdm | 6de65e769562adedca326adaaf66518870830e62 | 63ec623526e3867158a4847e3eff1c5d3350552b | refs/heads/master | 2021-11-25T10:59:02.624170 | 2021-11-11T05:35:37 | 2021-11-11T07:19:16 | 39,352,874 | 18 | 7 | null | null | null | null | UTF-8 | R | false | false | 2,036 | rd | varImportance.Rd | \name{getVarImp}
\alias{getVarImp}
\alias{getVarImp,sdmModels-method}
\title{variable importance}
\description{
Calculates relative importance of different variables in the models using several approaches.
}
\usage{
getVarImp(x,id,wtest,...)
}
\arguments{
\item{x}{sdmModels object}
\item{id}{numeric, specify the model (modelID) for which the variable importance values are extracted}
\item{wtest}{specifies which dataset ('training','test.dep','test.indep') should be used (if exist) to calculate the importance of variables }
\item{...}{additional arguments as for \code{getModelId} function}
}
\details{
\code{getVarImp} function returns an object including different measures of variable importance, and if be put in plot function, a barplot is generated. If the ggplot2 package is installed on your machine, the plot is generated using ggplot (unless you turn gg = FALSE), otherwise, the standard barplot is used.
}
\references{
Naimi, B., Araujo, M.B. (2016) sdm: a reproducible and extensible R platform for species distribution modelling, Ecography, DOI: 10.1111/ecog.01881
}
\author{Babak Naimi \email{naimi.b@gmail.com}
\url{https://www.r-gis.net/}
\url{https://www.biogeoinformatics.org}
}
\examples{
\dontrun{
# if m is a sdmModels object (output of the sdm function) then:
getVarImp(m,id=1,wtest='training') # variable importance based on training dataset
vi <- getVarImp(m,id=1,wtest='test.dep')
vi
plot(vi,'auc')
plot(vi,'cor')
#############
# You can get Mean variable importance (and confidence interval) for multiple models:
vi <- getVarImp(m,id=1:10,wtest='test.dep') # specify the modelIDs of the models
vi
plot(vi,'cor')
# you can use the getModelId function to find the id of the specific method, replication, etc.
# or you may put the arguments of the getModelId in the getVarImp function:
vi <- getVarImp(m, method='glm') # Mean variable importance for the method glm
vi
plot(vi)
plot(vi, gg = F) # R standard plot is used instead of ggplot
}
}
\keyword{spatial} |
6fcbf633541895a06dfe3a9ce14223cd143fb104 | ba2dbc7f0d12f98f09e6cc79a2d5f64bc605eb63 | /DataViz/server.R | a370bcd6c68f7b627e957d50b4a0b699b55c2445 | [
"MIT"
] | permissive | eledero/RDatelligence | 830e48203ad2d800fc91b07017a367187bd4ff53 | dc5994bcbc7c7a4635eb0eadb5ac6e03f745c8ee | refs/heads/master | 2020-05-21T23:53:40.254878 | 2018-09-18T16:04:06 | 2018-09-18T16:04:06 | 62,474,318 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,141 | r | server.R | library(ggplot2)
library(dplyr)
library(gtable)
library(grid)
library(ggthemes)
options(shiny.maxRequestSize = 11*1024^2)
shinyServer(function(input, output, session) {
source("dataLoad.R", local = TRUE) #Cargue de archivos CSV de base y nombres
source("dataGen.R", local = TRUE) #Generación de dataframe base
source("plot1.R", local = TRUE) #Plot de evolución
source("plot2.R", local = TRUE) #Plot de Mkt Share
source("table2.R", local = TRUE) #Tabla de Mkt Share
source("checks.R", local = TRUE) #Tabla original y Summary
source("updateSelectizeInput.R", local = TRUE) #Actualizaciones de los campos en los SelectizeInput
source("doubleData.R", local = TRUE)
source("doubleData1.R", local = TRUE)
source("doubleData2.R", local = TRUE)
source("doubleData3.R", local = TRUE)
source("doubleMerge.R", local = TRUE)
source("variation.R", local = TRUE)
source("varGraph.R", local = TRUE)
source("report.R", local = TRUE)
source("params.R", local = TRUE)
source("rep_1.R", local = TRUE)
source("rep_2.R", local = TRUE)
source("rep_3.R", local = TRUE)
#source("reportes.R", local = TRUE)
}) |
27652062de46c373c5ca21edc7fd43dd9ddc5618 | e1c85152190571d098cb195b331556e4ca9c1c06 | /Rpackages/gemtcPlus/man/nma_pre_proc.Rd | 13a346053b34478b91a088f19bff3b5d81bbf92d | [
"Apache-2.0"
] | permissive | Diarmuid78/Global-HTA-Evidence-Open | 4227ad9be6012a4083aba741827abe4e648fac37 | 050767457dc82da1d5a7f14967c72d626615effb | refs/heads/master | 2023-06-03T23:10:56.353787 | 2021-06-29T11:54:50 | 2021-06-29T11:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 531 | rd | nma_pre_proc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nma_pre_proc.R
\name{nma_pre_proc}
\alias{nma_pre_proc}
\title{NMA data pre-processing}
\usage{
nma_pre_proc(data, plan)
}
\arguments{
\item{data}{input \code{data.frame}}
\item{plan}{A \code{list} containing the model plan}
}
\value{
A network object or if gsd a list containing a network object and jags init parameters
}
\description{
NMA data pre-processing
}
\seealso{
\code{\link{groupedTTE_fp_pre_proc}}, \code{\link{groupedTTE_pwe_pre_proc}}
}
|
16634a4e5485d0ae3de498ec738562527689ee92 | 4179c1525e5fbe925044cb01ffb40e6f0c07f71c | /man/predict.gaterSVM.Rd | 55602cd572a7a01b6cee6bf979d5a7c6e9bd9948 | [] | no_license | hetong007/SwarmSVM | 20e58c0887da3eb90483759119af3a343e9862bd | a82b7eb37d3adb51decfc98f637d9bc32ba5b652 | refs/heads/master | 2022-12-27T04:53:10.674589 | 2022-12-15T08:38:34 | 2022-12-15T08:38:34 | 36,259,233 | 15 | 8 | null | null | null | null | UTF-8 | R | false | true | 607 | rd | predict.gaterSVM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gaterSVM.R
\name{predict.gaterSVM}
\alias{predict.gaterSVM}
\title{Prediction for Gater SVM}
\usage{
\method{predict}{gaterSVM}(object, newdata, ...)
}
\arguments{
\item{object}{An object of class \code{gaterSVM}}
\item{newdata}{newdata An n x p matrix containing the new input data.
Could be a matrix or a sparse matrix object.}
\item{...}{parameters for future usage.}
}
\description{
The function applies a model produced by the
\code{gaterSVM} function to every row of a data matrix and returns the model predictions.
}
|
fe888587cdba1838ae62ca20769821aafee20f70 | 94c0d1574ad8ba81a1ef0d48020b92ba681a5c6a | /snakePipes/shared/rscripts/scRNAseq_eisaR.R | 3b879bb6b12d6680ad5a3c6780553c7a99839ea9 | [
"MIT"
] | permissive | maxplanck-ie/snakepipes | 650de654c8bb6b197743d5bb59628df2d91d3a79 | 6144e3fdc1bdaa26e05b1cb234df7414c61e283a | refs/heads/master | 2023-09-05T09:25:33.130890 | 2023-06-05T13:38:57 | 2023-06-05T13:38:57 | 54,579,435 | 318 | 91 | MIT | 2023-08-22T12:07:49 | 2016-03-23T17:23:31 | Python | UTF-8 | R | false | false | 3,335 | r | scRNAseq_eisaR.R | #this is a modification of https://github.com/csoneson/rna_velocity_quant/blob/master/scripts/generate_cdna_intron_fa_prepref.R , authored by C.Soneson
sink(snakemake@log[["out"]])
.libPaths(R.home("library"))
wdir<-snakemake@params[["wdir"]]
if (!dir.exists(wdir)) dir.create(wdir)
setwd(wdir)
message(sprintf("working directory is %s",getwd()))
suppressPackageStartupMessages({
library(Biostrings)
library(rtracklayer)
library(dplyr)
library(GenomicFeatures)
library(BiocGenerics)
library(BSgenome)
library(GenomicRanges)
})
gtf<-snakemake@params[["gtf"]]
genome_fasta<-snakemake@input[["genome_fasta"]]
scriptdir<-snakemake@params[["scriptdir"]]
isoform_action<-snakemake@params[["isoform_action"]]
flanklength<-as.integer(snakemake@params[["flank_length"]])
joint_fasta<-snakemake@params[["joint_fasta"]]
joint_t2g<-snakemake@params[["joint_t2g"]]
print(scriptdir)
print(gtf)
print(genome_fasta)
print(isoform_action)
print(flanklength)
print("sourcing extractIntronSeqs.R ..")
source(file.path(scriptdir, "extractIntronSeqs.R"))
print("sourcing extractTxSeqs.R ..")
source(file.path(scriptdir, "extractTxSeqs.R"))
print("..done")
## Extract intronic sequences flanked by L-1 bases
## of exonic sequences where L is the biological read length
print("loading genome ..")
genome <- Biostrings::readDNAStringSet(genome_fasta)
names(genome) <- sapply(strsplit(names(genome), " "), .subset, 1)
print("..done")
print("loading gtf ..")
gtfdf <- as.data.frame(rtracklayer::import(gtf))
print("..done")
## Extract transcript and intron sequences
tx <- extractTxSeqs(gtf = gtf, genome = genome, type = "spliced")
intr <- extractIntronSeqs(gtf = gtf, genome = genome, type = isoform_action,
flanklength = flanklength,
joinOverlappingIntrons = FALSE)
## Generate transcript/intron-to-gene mapping
t2gtx <- gtfdf %>% dplyr::filter(type == "transcript") %>%
dplyr::select(transcript_id, gene_id) %>%
dplyr::distinct()
if (isoform_action == "collapse") {
## Intron names already contain gene name
t2gin <- data.frame(intr = names(intr),
gene = gsub("\\-I[0-9]*$", "", names(intr)),
stringsAsFactors = FALSE)
t2gin$gene_id<-paste0(t2gin$gene_id,"-I")
} else if (isoform_action == "separate") {
## Intron names contain transcript name
t2gin <- data.frame(intr = names(intr),
transcript_id = gsub("\\-I[0-9]*$", "", names(intr)),
stringsAsFactors = FALSE) %>%
dplyr::left_join(t2gtx, by = "transcript_id") %>%
dplyr::select(intr, gene_id)
t2gin$gene_id<-paste0(t2gin$gene_id,"-I")
} else {
stop("Unknown isoform_action")
}
colnames(t2gin) <- colnames(t2gtx)
t2g <- rbind(t2gtx, t2gin)
Biostrings::writeXStringSet(c(tx, intr), joint_fasta, compress = FALSE)
write.table(names(tx), file = file.path(wdir, "cDNA_tx_to_capture.txt"),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
write.table(names(intr), file = file.path(wdir, "introns_tx_to_capture.txt"),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
write.table(t2g, file = joint_t2g,
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
message('done all')
sink()
sink("sessionInfo.txt")
sessionInfo()
sink()
|
0303585faecd5def0f85fa42877b07a0cc49bb32 | ba5e8bafef9c419a5f2c458b0fb3b9ea29fdbcb1 | /R/graphfunctions.R | e2a256e89220ba2806db7d42834e2dfc29825282 | [] | no_license | Bioconductor/graph | eb5e26d0a287eafed39f99694f9cd04ee952cd83 | 91706d22ecc17cb42de0395803272740a99d5942 | refs/heads/devel | 2023-08-29T12:39:45.270517 | 2023-04-25T13:38:48 | 2023-04-25T13:38:48 | 102,150,187 | 3 | 15 | null | 2023-03-27T20:24:28 | 2017-09-01T20:22:29 | R | UTF-8 | R | false | false | 9,137 | r | graphfunctions.R | ################################################################
# function:
# boundary takes two parameters:
# graph is the original graph from which the subgraph will be created
# subgraph either the subgraph or the nodes of the subgraph
# boundary returns a list of length equal to the number of nodes in the
# subgraph. Each element is a list of the nodes in graph
#
# created by: Elizabeth Whalen
# last updated: Feb 15, 2003, RG
################################################################
boundary<-function(subgraph, graph)
{
if ( !is(graph, "graph") )
stop("'graph' must be an object of type graph")
if( is(subgraph, "graph") )
snodes <- nodes(subgraph)
else if( is.character(subgraph) )
snodes <- subgraph
else
stop("'subgraph' type incorrect")
if( any( !(snodes %in% nodes(graph)) ) )
stop("some nodes not in graph")
subE <- inEdges(graph)[snodes]
lapply(subE, function(x) x[!(x %in% snodes)] )
}
##check to see if any edges are duplicated, as we often don't have
##good ways to deal with that
duplicatedEdges <- function(graph) {
if( !is(graph, "graphNEL") )
stop("only graphNEL supported")
for(e in graph@edgeL)
if( any(duplicated(e$edges)) )
return(TRUE)
return(FALSE)
}
ugraphOld <- function()
{
.Defunct("ugraph")
}
setMethod("ugraph", "graph",
function(graph) {
if (!isDirected(graph))
return(graph)
eMat <- edgeMatrix(graph)
## add recip edges
eMat <- cbind(eMat, eMat[c(2, 1), ])
## put into graphNEL edgeL format
eL <- lapply(split(as.vector(eMat[2, ]), as.vector(eMat[1, ])),
function(x) list(edges=unique(x)))
theNodes <- nodes(graph)
## some nodes may be missing
names(eL) <- theNodes[as.integer(names(eL))]
## add empty edge list for nodes with no edges
noEdgeNodes <- theNodes[!(theNodes %in% names(eL))]
noEdges <- lapply(noEdgeNodes,
function(x) list(edges=numeric(0)))
names(noEdges) <- noEdgeNodes
## FIXME: should we skip standard initialize for speed?
## need to copy over at least the nodeData...
graphNEL(nodes=theNodes, edgeL=c(eL, noEdges),
edgemode="undirected")
})
setMethod("edgeMatrix", c("graphNEL", "ANY"),
function(object, duplicates=FALSE) {
## Return a 2 row numeric matrix (from, to, weight)
ed <- object@edgeL
##reorder to the same order as nodes
ed <- ed[nodes(object)]
nN <- length(ed)
eds<-lapply(ed, function(x) x$edges)
elem <- listLen(eds)
from <- rep(seq_len(nN), elem)
to <- unlist(eds, use.names=FALSE)
ans <- rbind(from, to)
##we duplicate edges in undirected graphNEL
##so here we remove them
if( edgemode(object) == "undirected" && !duplicates) {
swap <- from>to
ans[1,swap]<-to[swap]
ans[2,swap]<-from[swap]
t1 <- paste(ans[1,], ans[2,], sep="+")
ans <- ans[ ,!duplicated(t1), drop=FALSE]
}
ans
})
setMethod("edgeMatrix", c("clusterGraph", "ANY"),
function(object, duplicates) {
cls<-object@clusters
nd <- nodes(object)
ans <- numeric(0)
for(cl in cls) {
idx <- match(cl, nd)
nn <- length(idx)
v1 <- rep(idx[-nn], (nn-1):1)
v2 <- numeric(0)
for( i in 2:nn)
v2 <- c(v2, i:nn)
v2 <- idx[v2]
ta <- rbind(v1, v2)
if( is.matrix(ans) )
ans <- cbind(ans, rbind(v1, v2))
else
ans <- rbind(v1, v2)
}
dimnames(ans) <- list(c("from", "to"), NULL)
ans
})
setMethod("edgeMatrix", c("distGraph", "ANY"),
function(object, duplicates) {
## Return a 2 row numeric matrix (from, to, weight)
ed <- edges(object)
##reorder to the same order as nodes
NODES <- nodes(object)
ed <- ed[NODES]
nN <- length(ed)
elem <- listLen(ed)
from <- rep(seq_len(nN), elem)
to <- match(unlist(ed), NODES)
ans <- rbind(from, to)
##we duplicate edges in undirected graphNEL
##so here we remove them
##FIXME: see graphNEL for a speedup of this part
if( edgemode(object) == "undirected" && !duplicates) {
t1 <- apply(ans, 2, function(x) {paste(sort(x),
collapse="+")})
ans <- ans[ ,!duplicated(t1), drop=FALSE]
}
ans
})
setMethod("edgeMatrix", "graphAM",
function(object, duplicates=FALSE) {
to <- apply(object@adjMat, 1, function(x) which(x != 0), simplify=FALSE) # list
stopifnot(is(to, "list"))
from <- rep(seq_len(numNodes(object)), listLen(to))
to <- unlist(to, use.names=FALSE)
ans <- rbind(from=from, to=to)
## we duplicate edges in undirected graphs
## so here we remove them
if (!isDirected(object) && !duplicates) {
swap <- from > to
ans[1, swap] <- to[swap]
ans[2, swap] <- from[swap]
t1 <- paste(ans[1, ], ans[2, ], sep="+")
ans <- ans[ , !duplicated(t1), drop=FALSE]
}
ans
})
##it seems to me that we might want the edge weights for
##a given edgeMatrix and that that would be much better done
##in the edgeMatrix function
##we are presuming that eM has integer offsets in it
##eWV <- function(g, eM, sep=ifelse(edgemode(g)=="directed", "->",
## "--"))
##{
## unE <- unique(eM[1,])
## edL <- g@edgeL
## eE <- lapply(edL, function(x) x$edges)
## eW <- lapply(edL, function(x) {
## ans = x$weights
## ed = length(x$edges)
## if( is.null(ans) && ed > 0 )
## ans = rep(1, ed)
## ans})
##
## nr <- listLen(eE)
## ##now we can subset -
## eMn <- paste(rep((1:length(nr))[unE],nr[unE]), unlist(eE[unE]), sep=sep)
## eWv <- unlist(eW[unE])
## dE <- paste(eM[1,], eM[2,], sep=sep)
## wh<-match(dE, eMn)
## if(any(is.na(wh)) )
## stop("edges in supplied edgematrix not found")
## ans <-eWv[wh]
## names(ans) <- eMn[wh]
## ans
##}
#eWV <- function(g, eM, sep=ifelse(edgemode(g)=="directed", "->",
# "--"))
#{
# edL <- g@edgeL
# ##fix up the edgeweights so we really find them
# eW <- lapply(edL, function(x) {
# ans = x$weights
# ed = length(x$edges)
# if( is.null(ans) && ed > 0 )
# ans = rep(1, ed)
# if( length(ans) > 0 )
# names(ans) = x$edges
# ans})
#
# a1 <- apply(eM, 2,
# function(x) eW[[x[1]]][as.character(x[2])])
# names(a1) <- paste(eM[1,], eM[2,], sep=sep)
# return(a1)
#}
eWV <- function (g, eM, sep = ifelse(edgemode(g) == "directed", "->",
"--"), useNNames = FALSE)
{
# returns vector of weights. default has names equal to node
# indices, but useNNames can be set to put node names as names
# of corresponding weights
#
n <- nodes(g)
from <- n[eM["from", ]]
to <- n[eM["to", ]]
eW <- tryCatch(edgeData(g, from=from, to=to, attr="weight"),
error=function(e) {
edgeDataDefaults(g, "weight") <- 1L
edgeData(g, from=from, to=to, attr="weight")
})
eW <- unlist(eW)
if (!useNNames)
nms <- paste(eM["from", ], eM["to", ], sep=sep)
else
nms <- paste(from, to, sep=sep)
names(eW) <- nms
eW
}
pathWeights <- function (g, p, eM = NULL)
{
#
# a path is a vector of names of adjacent nodes
# we form the vector of steps through the path
# (pairs of adjacent nodes) and attach the weights
# for each step. no checking is done to verify
# that the path p exists in g
#
if (length(p) < 2)
stop("'p' has length < 2")
if (is.null(eM))
eM <- edgeMatrix(g)
wv <- eWV(g, eM, useNNames = TRUE)
sep <- ifelse(edgemode(g) == "undirected", "--", "->")
pcomps <- cbind(p[-length(p)], p[-1])
if (edgemode(g) == "undirected") pcomps <- rbind(pcomps, pcomps[,c(2,1)]) # don't know node order in wv labels
inds <- apply(pcomps, 1, function(x) paste(x[1], x[2], sep = sep))
tmp <- wv[inds]
tmp[!is.na(tmp)]
}
|
841f67cba9cfe96ea460c440e431cbf5377af903 | ad85c921d614df2354c6aaea199084be7e935f04 | /R/pseudoexpo.R | 0a5872574b94ef4bc136badb47bf9b7e69c5b988 | [] | no_license | rcqls/EBSpatCGAL | 3cf7fc1213aaae69424e7c18eb40a9210e10e0df | f20ba631e6b395e3fa8855e9f1981dbd0de143d0 | refs/heads/master | 2023-07-21T01:11:10.937736 | 2023-07-18T13:29:22 | 2023-07-18T13:29:22 | 12,945,905 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,284 | r | pseudoexpo.R | # #require(getattr)
# ## TODO:
# ## 1) run(pseudo,...,gridSize=?,...)
# ## C part: set nbPtsGrille and a reallocation of the statex
# ## 2) run(pseudo,...,domainSize=?,...)
# ## C part: set tailleDomaine
# ## 3) statex à changer pour calculer la matrice de covariance des estimateurs!
# ## How this works?
# ## 1) libebpseudo: used here to calculate the statExG and statExND needed to compute
# ## the pseudo and the gradient in the exponential case
# ## 2) there are two ways to compute of the log-pseudo and its gradient:
# ## a) inside R from the statex provided by libebpseudo
# ## b) inside libebpseudo: no longer available since not in the R spirit!
# ####################################################################################################################
# ## class PseudoExpo
# PseudoExpo<-function(model,nbPts,domainSize,marks=NULL,mode=c("random","systematic"),weighted=FALSE) {
# Func.mode("PseudoExpo") #used for conversion of formula
# ### debugMode: cat("formula -> Func")
# if(inherits(model,"formula")) model<-Func(model,"PseudoExpo")
# ### debugMode: cat("model->");print(model)
# pseudo <- if(weighted)
# CqlsObj(PseudoExpoWeightedMatrix,PseudoExpo,ContrastOptim,ReplicableGibbs)
# else
# CqlsObj(PseudoExpoMatrix,PseudoExpo,ContrastOptim,ReplicableGibbs)
# pseudo$call <- match.call()
# pseudo$mode<-match.arg(mode)
# pseudo$weighted<-weighted
# if(missing(nbPts)) nbPts<-10000L
# pseudo$func<-model
# pseudo$response <- eval(pseudo$func$response,parent.frame()) # thanks to Ege!
# #added for communicating with Func.new in order to declare the marks names
# .funcEnv$.marks.names <- NULL
# if(is.marked(pseudo$response)) .funcEnv$.marks.names <- pseudo$response$del.marks.name
# else if(!is.null(marks)) .funcEnv$.marks.names <- if(is.character(marks)) marks else marks$name
# if(missing(domainSize)) domainSize<-pseudo$response$pl$vor$sizeIn
# if(length(domainSize)==1) domainSize<-c(domainSize,domainSize)
# pseudo$.domainSize<-domainSize
# pseudo$.nbPts<-as.integer(nbPts)
# if(pseudo$weighted) {SumCache<-SumCacheCompFunc;SamplCache<-SamplCacheCompFunc}
# else {SumCache<-SumCache;SamplCache<-SamplCache}
# ## verboseMode: cat("sumCache->")
# pseudo$sumCache <- SumCache(pseudo$func,pseudo$domainSize,pseudo$response$pl)
# ## verboseMode: cat("Done\n");cat("samplCache->")
# pseudo$samplCache <- SamplCache(pseudo$func,pseudo$nbPts,pseudo$domainSize,pseudo$response$pl,pseudo$mode)
# ## verboseMode: cat("Done\n")
# init.cv2l.PseudoExpo(pseudo)
# pseudo$nbParam <- sum(unlist(pseudo$cv2l))
# pseudo$par0 <- rep(0,pseudo$nbParam)
# pseudo$response_runs <- 0L #Normally update has to be done!
# pseudo
# }
# init.cv2l.PseudoExpo <- function(pseudo) {
# st<-list(Single=1)
# for(i in 2:length(pseudo$func$fct)) {
# st <- c(st,list(par=as.vector(pseudo$func$fct[[i]]$term$caracLoc.size)))
# }
# pseudo$cv2l <- Vector2ListConverter(st)
# }
# "param<-.PseudoExpo" <- function(pseudo,value) {
# pars <- by(pseudo$cv2l,value)
# pseudo$func$fct[[1]]$term$vars$Single <- pars$Single
# for(i in 2:length(pars)) pseudo$func$fct[[i]]$term$vars$par <- pars[[i]]
# }
# reactivate.PseudoExpo<-function(pseudo) {
# #Func.mode("PseudoExpo")
# reactivate(pseudo$func)
# reactivate(pseudo$sumCache)
# reactivate(pseudo$samplCache)
# reactivate(eval(pseudo$response))
# }
# print.PseudoExpo<-function(pseudo) {
# print(names(pseudo))
# return(pseudo)
# }
# #update
# update.PseudoExpoMatrix<-function(pseudo,verbose=TRUE) {
# #reactivate(pseudo)
# if(verbose) cat("Please wait: updating object PseudoExpo(Matrix) ...")
# update(pseudo$samplCache,verbose)
# pseudo$left <- cbind(1,as.matrix(pseudo$samplCache)) #<- as.list(pseudo$samplCache)
# update(pseudo$sumCache,verbose)
# pseudo$right <-as.matrix(pseudo$sumCache)
# pseudo$rightTerm <- apply(cbind(1,pseudo$right),2,sum)/pseudo$domainSize[1]/pseudo$domainSize[2]
# pseudo$updated<-TRUE
# if(verbose) cat(" -> Done!\n")
# }
# update.PseudoExpoWeightedMatrix<-function(pseudo,verbose=TRUE) {
# #reactivate(pseudo)
# if(verbose) cat("Please wait: updating object PseudoExpo(WeightedMatrix) ...")
# update(pseudo$samplCache,verbose)
# tmp <- as.data.frame(pseudo$samplCache)
# pseudo$leftWeight<-(tmp$weight)/sum(tmp$weight)
# pseudo$left<-cbind(1,as.matrix(tmp[-1]))
# #number of point
# #pseudo$leftNb <- sum(pseudo$left[[1]]$weight)
# #list of indices
# #tmp<- sapply(pseudo$left,function(df) length(df)-1)
# #pseudo$leftInd <- lapply(1:length(tmp),function(i) seq((cumsum(c(0,tmp))+1)[i]+1,length=tmp[i]))
# update(pseudo$sumCache,verbose)
# pseudo$right <- as.data.frame(pseudo$sumCache)
# pseudo$rightTerm<-c(sum(pseudo$right$weight),t(pseudo$right$weight)%*%as.matrix(pseudo$right[-1]))/pseudo$domainSize[1]/pseudo$domainSize[2]
# #pseudo$right<- c(sum(tmp[[1]]$weight),sapply(tmp,function(df) as.vector(t(cbind(df[-1]) ) %*%df[[1]])))/pseudo$domainSize/pseudo$domainSize
# pseudo$updated<-TRUE
# if(verbose) cat(" -> Done!\n")
# }
# #pseudo et gradient en R
# contrast.optim.PseudoExpoMatrix<-function(pseudo,param) {
# sum(pseudo$rightTerm*param)+mean(exp(-pseudo$left%*%param))
# }
# gradient.optim.PseudoExpoMatrix<-function(pseudo,param) {
# pseudo$rightTerm-apply(pseudo$left*as.vector(exp(-pseudo$left%*%param)),2,mean)
# }
# contrast.optim.PseudoExpoWeightedMatrix<-function(pseudo,param) {
# sum(pseudo$rightTerm*param)+(t(pseudo$leftWeight)%*%exp(-pseudo$left%*%param))
# }
# gradient.optim.PseudoExpoWeightedMatrix<-function(pseudo,param) {
# pseudo$rightTerm-apply(pseudo$left*as.vector(t(pseudo$leftWeight)%*%exp(-pseudo$left%*%param)),2,mean)
# }
# func.PseudoExpo<-function(pseudo,...) pseudo$func
# terms.PseudoExpo<-function(pseudo,...) terms(func(pseudo))
# summary.PseudoExpo<-function(pseudo,...) summary(func(pseudo),...)
# identify.PseudoExpo<-function(pseudo,mode=1,...) {
# poly<-pseudo$response$pl
# plot(poly$vor,...)
# if(mode==1) pt<-unlist(locator(1))
# else pt<-identify(x=t(poly$vor$delVertex),label=poly$vor$delId,n=1,plot=FALSE)
# ##.External("ebpseudoExpo_statex_dv",pseudo,pt,poly,mode,PACKAGE = "EBSpat")
# pseudo$func[poly,pt]
# }
|
db3c60eedbdd072eb2e5d459cab212b36b28d8f2 | 2f15b2dc16de0471e7bee43f6739b6ad8522c81d | /R/record_update_set_is_dimension_in_updates.R | 35a359863259b10ab4ac8f7e55b0cac9cfa557a5 | [
"MIT"
] | permissive | billster45/starschemar | 45566be916c95778727a3add3239143d52796aa9 | 5f7e0201494a36f4833f320e4b9535ad02b9bdc1 | refs/heads/master | 2022-12-20T13:45:06.773852 | 2020-09-26T03:44:12 | 2020-09-26T03:44:12 | 298,796,838 | 1 | 0 | NOASSERTION | 2020-09-26T11:10:30 | 2020-09-26T11:10:30 | null | UTF-8 | R | false | false | 843 | r | record_update_set_is_dimension_in_updates.R |
#' Is dimension in set of updates?
#'
#' Given a set of dimension record update operations and the name of a
#' dimension, it checks if there is any update operation to perform on the
#' dimension.
#'
#' @param updates A `record_update_set` object, list of dimension record update
#' operations.
#' @param name A string, name of the dimension.
#'
#' @return A boolean, indicating if the dimension appears in the list of update
#' operations.
#'
#' @keywords internal
is_dimension_in_updates <- function(updates, name) {
UseMethod("is_dimension_in_updates")
}
#' @rdname is_dimension_in_updates
#' @export
#' @keywords internal
is_dimension_in_updates.record_update_set <- function(updates, name) {
for (m in seq_along(updates)) {
match <- updates[[m]]
if (match$dimension == name) {
return(TRUE)
}
}
FALSE
}
|
66e8c0f237af38714b41088888334ca3282202d2 | 1d7b8d97be6d3b3aed26bc19ea6855bbdb2d21bc | /R/helpingFunctions.R | 0bd92e5946c0acd4d9389e970fc7d520cd62ac41 | [] | no_license | cran/BaPreStoPro | 18633df8e18b518225e7c01147473684d9369a46 | f3e8f06b07ec4b4ca0be9de3d481734d5e154c31 | refs/heads/master | 2021-01-19T01:11:31.691784 | 2016-06-07T14:28:11 | 2016-06-07T14:28:11 | 60,611,461 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,275 | r | helpingFunctions.R | #'
#'
#' Transformation of event times to NHPP
#'
#' @description Transformation of vector of event times to the corresponding counting process variables.
#' @param times vector of event times
#' @param t times of counting process
#'
#' @export
TimestoN <- function(times, t){
lt <- length(t)
dN <- numeric(lt)
ind <- unlist(sapply(times[times <= max(t)], function(a){which(abs(t-a) == min(abs(t-a)))}))
for(a in 1:length(ind)) dN[ind[a]] <- dN[ind[a]] + 1
cumsum(dN)
}
#' Transformation of NHPP variables to event times
#'
#' @description Vector of Poisson process differences are translated to a vector of event times.
#' @param dN vector of differences of counting process
#' @param t times of counting process
#' @export
dNtoTimes <- function(dN, t){
if(any(dN > 1)){
m <- sum(dN > 0)
res <- NULL
he1 <- dN[dN > 0]
he2 <- t[dN > 0]
for(mi in 1:m){
res <- c(res, rep(he2[mi], he1[mi]))
}
}else{
res <- t[dN > 0]
}
res
}
#' Sampling from lognormal proposal density
#'
#' @description Drawing one sample from the lognormal distribution with mean \code{parOld} and standard deviation \code{propSd}. Used in Metropolis Hastings algorithms.
#' @param parOld the parameter from the last iteration step
#' @param propSd proposal standard deviation
#'
#' @examples
#' plot(replicate(100, proposal(1, 0.1)), type = "l")
#' @export
proposal <- function(parOld, propSd){
if(any(parOld < 1e-150)) parOld[parOld < 1e-150] <- 1e-150 # 1e-320 equal to zero ...
mu <- log(parOld) - log( propSd^2/(parOld^2) + 1)/2
sigma2 <- log( propSd^2/(parOld^2)+1)
rlnorm(length(parOld), mu, sqrt(sigma2))
}
#' Proposal ratio of lognormal proposal density
#'
#' @description Calculation of proposal ratio, see also \code{\link{proposal}}.
#' @param parOld the parameter from the last iteration step
#' @param parNew drawn candidate
#' @param propSd proposal standard deviation
#'
#' @examples
#' cand <- proposal(1, 0.01)
#' proposalRatio(1, cand, 0.01)
#' @export
proposalRatio <- function(parOld, parNew, propSd){
muOld <- log(parOld) - log( propSd^2/exp(2*log(parOld)) + 1)/2
sigma2Old <- log( propSd^2/exp(2*log(parOld))+1)
muNew <- log(parNew) - log( propSd^2/exp(2*log(parNew)) + 1)/2
sigma2New <- log( propSd^2/exp(2*log(parNew))+1)
prod(dlnorm(parOld, muNew, sqrt(sigma2New))/dlnorm(parNew, muOld, sqrt(sigma2Old)))
}
#' Inversion Method
#'
#' @description Algorithm to sample from cumulative distribution function, if no inverse function is analytically available.
#' @param Fun cumulative distribution function
#' @param len number of samples
#' @param candArea candidate area
#' @param grid fineness degree
#' @param method vectorial ("vector") or not ("free")
#' @examples
#' test <- InvMethod(function(x) pnorm(x, 5, 1), 1000, candArea = c(0, 10), method = "free")
#' plot(density(test))
#' curve(dnorm(x, 5, 1), col = 2, add = TRUE)
#' @references
#' Devroye, L. (1986). Non-Uniform Random Variate Generation. New York: Springer.
#' @export
InvMethod <- function(Fun, len, candArea, grid = 1e-05, method = c("vector", "free")){
method <- match.arg(method)
if(missing(candArea)){
candArea <- findCandidateArea(Fun)
if(method == "vector") candArea <- seq(candArea[1], candArea[2], by = grid)
}else{
if(method == "vector" & length(candArea) == 2) candArea <- seq(candArea[1], candArea[2], by = grid)
if(method == "free" & length(candArea) > 2) candArea <- c(min(candArea), max(candArea))
}
if(method == "vector"){
diFu <- sapply(candArea, Fun)
U <- runif(len, 0, max(diFu))
res <- sapply(U, function(u) candArea[which(diFu >= u)[1]])
}
if(method == "free"){
res <- numeric(len)
U <- runif(len)
for(i in 1:len){
lower <- candArea[1]
upper <- candArea[2]
diff <- upper - lower
while(diff >= grid){
if(Fun(lower+diff/2) < U[i]){
lower <- lower+diff/2
}else{
upper <- lower+diff/2
}
diff <- upper - lower
}
res[i] <- (lower+upper)/2
}
}
res
}
#' Rejection Sampling Algorithm
#'
#' @description Algorithm to sample from an arbitrary density function.
#' @param Fun cumulative distribution function
#' @param dens density
#' @param len number of samples
#' @param cand candidate area
#' @param grid fineness degree
#' @param method vectorial ("vector") or not ("free")
#' @references
#' Devroye, L. (1986). Non-Uniform Random Variate Generation. New York: Springer.
#' @examples
#' plot(density(RejSampling(dens = function(x) dnorm(x, 5, 1),
#' len = 500, cand = seq(2, 9, by = 0.001), method = "free")))
#' lines(density(RejSampling(dens = function(x) dnorm(x, 5, 1), len = 500,
#' cand = seq(2, 9, by = 0.001), method = "vector")), col=2)
#' curve(dnorm(x, 5, 1), from = 2, to = 8, add = TRUE, col = 3)
#' @export
RejSampling <- function(Fun, dens, len, cand, grid = 1e-03, method = c("vector", "free")){ # for negative support?!?
method <- match.arg(method)
if(method == "free"){
res <- numeric(len)
for(i in 1:len){
if(missing(cand)){
ca <- findCandidateArea(function(t) Fun(t))
}else{
ca <- range(cand)
}
mp <- optimize(f = function(t) dens(t), ca, maximum = TRUE)$objective
resi <- NULL
while(is.null(resi)){
u <- runif(1,0,mp)
candi <- runif(1, ca[1], ca[2])
prob <- dens(candi)
if(u <= prob){
resi <- candi
}
}
res[i] <- resi
}
}
if(method == "vector"){
res <- numeric(len)
if(missing(cand)){
ca <- findCandidateArea(Fun)
cand <- seq(ca[1], ca[2], by = grid)
}
prob <- vapply(cand, function(v) dens(v), FUN.VALUE = numeric(1))
cand <- cand[prob != 0]
prob <- prob[prob != 0]
mp <- max(prob)
count <- 1
while(count <= len){
u <- runif(1, 0, mp)
ind <- sample(1:length(cand), 1)
if(u <= prob[ind]){
res[count] <- cand[ind]
count <- count + 1
}
}
}
return(res)
}
#' Adaptation of proposal standard deviation
#'
#' @description Adaptive MCMC: if acceptance rate of the chain is smaller than \code{lower} or larger than \code{upper},
#' the proposal standard deviation \code{propSd}=exp(l) is adapted with respect to function \code{delta.n},
#' that means, the new proposal standard deviation
#' is equal to exp(l-\code{delta.n(batch)}), respectively exp(l+\code{delta.n(batch)}).
#'
#' @param chain Markov chain
#' @param propSd current proposal standard deviation
#' @param batch number of batch (of chain)
#' @param lower lower bound
#' @param upper upper bound
#' @param delta.n function of batch number
#'
#' @return adapted proposal standard deviation
#' @references Rosenthal, J. S. (2011). Optimal Proposal Distributions and Adaptive MCMC. In: Handbook of Markov Chain Monte Carlo, pp. 93-112.
#' @export
ad.propSd <- function(chain, propSd, batch, lower = 0.3, upper = 0.6, delta.n = function(n) min(0.05, 1/sqrt(n))){
ar <- length(unique(chain))/length(chain)
lsi <- log(propSd)
lsi[ar < lower] <- lsi - delta.n(batch)
lsi[ar > upper] <- lsi + delta.n(batch)
exp(lsi)
}
findCandidateArea <- function(VFun, start = 1, pos.support = TRUE, quasi.null = 1e-05){
if(pos.support){
cand <- start
while(1 - VFun(cand) > quasi.null){
cand <- cand*2
}
upper <- cand
cand <- start
while(VFun(cand) > 0){
cand <- cand/2
}
lower <- cand
} else{
cand <- start
while(1 - VFun(cand) > quasi.null){
cand <- cand*2
}
upper <- cand
cand <- -start
while(VFun(cand) > quasi.null){
cand <- cand*2
}
lower <- cand
}
c(lower, upper)
}
#' Calculation of a proposal for burn-in phase and thinning rate
#'
#' @description The proposed burn-in is calculated by dividing the Markov chains into \code{m} blocks and calculate the 95\% credibility intervals and the respective mean.
#' Starting in the first one, the block is taken as burn-in as long as the mean of the current block is not in the credibility interval of the following block or vice versa.
#' The thinning rate is proposed by the first lag which leads to a chain autocorrelation less than \code{dependence}.
#' It is not easy to automate these choices, so it is highly recommended to verify the chains manually.
#' @param chain vector of Markov chain samples
#' @param dependence allowed dependence for the chain
#' @param m number of blocks
#' @return vector of burn-in and thinning
#' @export
diagnostic <- function(chain, dependence = 0.8, m = 10) {
lc <- length(chain)
K <- floor(lc/m)
thinning <- min(which(acf(chain[-(1:floor(lc/5))], plot=F)$acf <= dependence)[1], floor(K/10), na.rm = TRUE)
he1 <- sapply(1:m, function(i) chain[((i - 1) * K + 1):(i * K)][seq(1, K, by = thinning)])
he2 <- apply(he1, 2, quantile, c(0.025, 0.975))
he.mean <- apply(he1, 2, mean)
is.in <- (he.mean[-m] >= he2[1, -1] & he.mean[-m] <= he2[2, -1]) | (he.mean[-1] >= he2[1, -m] & he.mean[-1] <= he2[2, -m])
# burnIn <- 0
burnIn <- K
for (i in 1:(m-1)) {
if (sum(is.in) < length(is.in)) {
is.in <- is.in[-1]
burnIn <- burnIn + K
}
}
burnIn <- min((m-1)*K, burnIn)
return(c(burnIn = burnIn, thinning = thinning))
}
|
9aa9c64cc75f06edc37432f3487a7102e4329280 | c0615b58a071f01bb0886b4525868f1dc0d13e02 | /man/getSymbols.Rd | 685d0737fdcd57a55ec85008fd175509bd075604 | [] | no_license | phamdinhkhanh/VNDS | 86e1ef74b8616864bfd9d7b2918be1ec812fbd6b | 04dee56f60727f715c40ce4a060807b6a227faa6 | refs/heads/master | 2022-01-13T11:11:03.448528 | 2019-05-12T05:26:07 | 2019-05-12T05:26:07 | 115,730,979 | 14 | 5 | null | 2017-12-30T04:59:53 | 2017-12-29T14:59:22 | R | UTF-8 | R | false | true | 501 | rd | getSymbols.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getSymbols.R
\name{getSymbols}
\alias{getSymbols}
\title{Lay gia chung khoan vietNam}
\usage{
getSymbols(symbol, from, to)
}
\arguments{
\item{symbol}{Ma chung khoan, gom 3 ki tu}
\item{from}{ngay bat dau dinh dang yyyy-mm-dd}
\item{to}{ngay ket thuc dinh dang yyyy-mm-dd}
}
\value{
mot data frame chua gia chung khoan va khoi luong
}
\description{
Ham so nay se lay gia chung khoan tu ngay bat dau den ngay ket thuc
}
|
d48fa0b8b79b3d6bec6efeac846835435bfb3261 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/WordR/examples/renderInlineCode.Rd.R | 305e90632befd0ec9fbb2a9f1235da4efe379baa | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 339 | r | renderInlineCode.Rd.R | library(WordR)
### Name: renderInlineCode
### Title: Read Word document with R code blocks, evaluate them and writes
### the result into another Word document.
### Aliases: renderInlineCode
### ** Examples
renderInlineCode(
paste(examplePath(),'templates/template1.docx',sep = ''),
paste(tempdir(),'/result1.docx',sep = ''))
|
022f7c264eb4062a2478c42d928e56bf3d21ee08 | 0a4c7468bee7c14a31282f0dc3b4e005506ffc99 | /R/best_worst_month.R | f9750604ed90f06a562be8516839b2f5abdc6d70 | [
"Apache-2.0"
] | permissive | cestob/backtestGraphics | 0bb6fd1208be4ea0d79504a5465aec93a6de43df | 27eda364121a69b74e68798a9fc48f97e6f5f4f8 | refs/heads/master | 2020-03-18T06:17:47.197107 | 2018-03-20T02:23:57 | 2018-03-20T02:23:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,193 | r | best_worst_month.R | #' Find the best-performing and the worst-performing months
#'
#' This function takes in a data set and returns a list with the best and the
#' worst month and their respective pnl's.
#'
#' @param x A data frame that contains data for individual instruments.
#'
#' @return A list with the best month and the worst month, as well as their
#' respective p&l's.
best_worst_month <- function(x){
## For R CMD Check to not issue notes of visible binding stuff
pnl <- NULL
## Initiate the list to generate output
output <- list()
## Select columns of the date and pnl in the data frame to save time for
## manipulating the data set. Then group the data set by year-month
x <- x %>%
select(date, pnl) %>%
mutate(date = format(date, "%Y-%m")) %>%
group_by(date) %>%
summarise(pnl = sum(pnl, na.rm = TRUE)) %>%
ungroup()
## Find the year-month with the largest p&l. Then find the pnl of the best month
output$best.month <- x$date[which.max(x$pnl)]
output$best.pnl <- max(x$pnl)
## Find the worst month and its p&l
output$worst.month <- x$date[which.min(x$pnl)]
output$worst.pnl <- min(x$pnl)
return(output)
}
|
891a411628ec0dfb12f8ad53f8b33c2650859633 | 15f909366627aed4f3867dc1eb453bc8f147eea8 | /scripts/test_statistics/likelihood_function_JSDM.R | 0998de7761cf6b04f028395c8fd72c45e24e76cd | [] | no_license | Doi90/JSDM_Prediction | 5da4655136ef706a9bace390e7eef875bda77af8 | 79d675a69ca410e25807e90d28cd53949f6a36db | refs/heads/master | 2020-03-11T18:18:33.990574 | 2019-10-23T01:45:05 | 2019-10-23T01:45:05 | 130,173,871 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,806 | r | likelihood_function_JSDM.R | #############################################################
#############################################################
### ###
### DEFINE LIKELIHOOD CODE ###
### ###
### This script contains the different functions for ###
### calculating the log likelihood value of the models. ###
### Kept the old version (log_likelihood_old) and Nick's ###
### replacements for reference ###
### ###
#############################################################
#############################################################
####################################
### Old version no longer in use ###
####################################
log_likelihood_old <- function(Beta = NULL,
X = NULL,
y = NULL,
R = NULL,
n_species = NULL,
n_sites = NULL,
n_iter = NULL){
## Tests to make sure correct inputs supplied
if(is.null(Beta)){
stop("Beta not supplied.")
}
if(is.null(X)){
stop("X not supplied.")
}
if(is.null(y)){
stop("y not supplied.")
}
if(is.null(R)){
stop("R not supplied.")
}
if(is.null(n_species)){
stop("n_species not supplied.")
}
if(is.null(n_iter)){
stop("n_iter not supplied.")
}
## Create an array of distribution mean values. Beta * X values
mean_values <- array(data = NA,
dim = c(n_sites,
n_species,
n_iter))
for(s in seq_len(n_iter)){
mean_values[, , s] <- as.matrix(X) %*% Beta[ , , s]
}
## Create a log_likelihood matrix full of NAs
log_lik <- matrix(NA,
nrow = n_sites,
ncol = n_iter)
## Calculate log likelihood values. Fill matrix with values as we go
approx_counter <- 0
### For each slice of array
for(s in seq_len(n_iter)){
### For each site
for(i in seq_len(n_sites)){
occ_state <- y[i, ] # observed occurrence state at site i
#### Define probability distribution thresholds
## lower / upper to limit integral of density for lielihood
lower <- rep(-Inf, n_species) # default vector of -Inf lower limits
upper <- rep(+Inf, n_species) # default vector of +Inf upper limits
for(k in seq_len(n_species)){ # set actual lower/upper limits based on known occurrence states
if(occ_state[k] == 0){ # if species is absent
upper[k] <- 0 # species absent when z<0
}
if(occ_state[k] == 1){ # if species is present
lower[k] <- 0 # species present when z>0
}
}
#### Prediction for species assemblage at site i using values from slice a
likelihood_tmp <- pmvnorm(mean = mean_values[ i, , s],
sigma = R[ , , s],
lower = lower,
upper = upper)
# If the default GenzBretz algorithm fails, redo with slower Miwa algorithm
if(likelihood_tmp[1] == 0 & attr(likelihood_tmp, "error") == 0){
likelihood_tmp <- pmvnorm(mean = mean_values[ i, , s],
sigma = R[ , , s],
lower = lower,
upper = upper,
algorithm = "Miwa")
}
if(likelihood_tmp[1] != 0){
likelihood <- likelihood_tmp[1]
}
if(likelihood_tmp[1] == 0){
likelihood <- attr(likelihood_tmp, "error")
approx_counter <- approx_counter + 1
}
#### Fill predictions array with value
log_lik[i, s] <- log(likelihood)
}
}
## Calculate single likelihood value for whole model
### Take the product of site-level likelihoods within a single sample
sum_log_lik <- colSums(log_lik,
na.rm = TRUE)
### Take the mean likelihood across samples
mean_log_lik <- mean(sum_log_lik,
na.rm = TRUE)
## Generate single output
log_lik_out <- list(likelihood = mean_log_lik,
approximation_rate = approx_counter / (n_sites * n_iter))
return(log_lik_out)
}
#######################################
### New Version: Multiple functions ###
#######################################
### Nick's four internal functions
logapb <- function(loga = NULL,
logb = NULL){
# calculate log(a + b) stably from log(a) and log(b)
# log(a + b) = log(a * (1 + b / a))
# = log(a) + log(1 + b / a)
# = log(a) + log(1 + exp(log(b) - log(a)))
# = log(a) + log1p(exp(log(b) - log(a)))
ans <- loga + log1p(exp(logb - loga))
return(ans)
}
logmean <- function(lx = NULL){
# given a vector `lx` containing the logs of the elements of a vector x,
# stably compute log(mean(x))
# sum the logs
l_sum <- lx[1]
for(i in 2:length(lx)){
l_sum <- logapb(l_sum, lx[i])
}
# divide by the number
ans <- l_sum - log(length(lx))
return(ans)
}
logint_mvprobit <- function(lower,
upper,
R,
niter){
# log-integral of a zero-centred multivariate probit with correlation matrix `R`,
# between the vectors `lower` and `upper`. Estimation by a Monte-Carlo-like
# simulation using the method of Botev (2015), with `niter` draws. Inspired by
# (and in plces borrowing heavily from) the implementation in the
# TruncatedNormal package, but optimised for this problem. All bugs my own
# etc.
require(TruncatedNormal)
# dimension of MVN (number of species)
m <- length(lower)
# truncated Cholesky decomposition
chol_result <- TruncatedNormal::cholperm(R, lower, upper)
L <- chol_result$L
D <- diag(L)
if(any(D < 10 ^ -10)){
warning("Method may fail as covariance matrix is singular!")
}
# scaling
l <- chol_result$l / D
u <- chol_result$u / D
L <- L / D - diag(m)
# magic
xmu <- TruncatedNormal::nleq(l, u, L)
mu <- xmu[m:(2 * m - 2)]
# loop through dimensions adding to the log integral
mu[m] <- 0
Z <- matrix(0, m, niter)
lp <- 0
for(k in 1:(m - 1)){
col <- t(L[k, 1:k]) %*% Z[1:k, ]
tl <- l[k] - mu[k] - col
tu <- u[k] - mu[k] - col
Z[k, ] <- mu[k] + TruncatedNormal::trandn(tl, tu)
lp <- lp + TruncatedNormal::lnNpr(tl, tu) + 0.5 * mu[k]^2 - mu[k] * Z[k, ]
}
col <- L[m, ] %*% Z
tl <- l[m] - col
tu <- u[m] - col
lp <- lp + TruncatedNormal::lnNpr(tl, tu)
# get log mean across all iterations and return
lp <- logmean(lp)
lp
}
lik_probit <- function(obs = NULL,
mu = NULL,
R = NULL,
log.p = FALSE,
niter = 1000){
# given a binary matrix `obs` of 1s and 0s (rows being sites and columns being
# species), a matching matrix `mu`, each row giving the mean of the
# multivariate normal across species for that site, and a species-species
# correlation matrix `R`, use `niter` Monte Carlo samples to estimate the
# probability of observing that vector of observations, under the multivariate
# probit model. Returns a vector of probabilities (by default on the log
# scale), corresponding to each site.
# check and get dimensions
stopifnot(all(dim(obs) == dim(mu)))
m <- ncol(obs)
n <- nrow(obs)
# set up truncation matrices
lower <- matrix(-Inf, nrow = n, ncol = m)
upper <- matrix(Inf, nrow = n, ncol = m)
# The subsequent code assumes a zero-mean MVN, so need to adjust truncation to
# account for mean, hence minus signs on mu
present <- obs == 1
lower[present] <- -mu[present]
upper[!present] <- -mu[!present]
# get probabilities for each row
ans <- rep(NA, n)
for(i in 1:n){
ans[i] <- logint_mvprobit(lower[i, ],
upper[i, ],
R,
niter)
}
# on probability scale if requested
if(!log.p){
ans <- exp(ans)
}
return(ans)
}
### My new wrapper around Nick's functions to fit the overall workflow
independent_log_likelihood <- function(y = NULL,
pred = NULL,
n_species = NULL,
n_sites = NULL,
n_iter = NULL){
## Tests to make sure correct inputs supplied
if(is.null(y)){
stop("y not supplied.")
}
if(is.null(pred)){
stop("pred not supplied.")
}
if(is.null(n_species)){
stop("n_species not supplied.")
}
if(is.null(n_sites)){
stop("n_sites not supplied.")
}
if(is.null(n_iter)){
stop("n_iter not supplied.")
}
## Create a log_likelihood matrix full of NAs
log_lik <- matrix(NA,
nrow = n_species,
ncol = n_iter)
## Calculate likelihoods
for(s in seq_len(n_iter)){
for(j in seq_len(n_species)){
### Get observed species state for all sites
obs_spp <- y[ , j]
### Get predicted species probabilities for all sites
pred_spp <- pred[ , j, s]
log_lik_tmp <- sum(dbinom(x = obs_spp,
size = 1,
prob = pred_spp,
log = TRUE))
## Add to storage matrix
log_lik[j, s] <- log_lik_tmp
}
}
return(log_lik)
}
joint_log_likelihood <- function(Beta = NULL,
X = NULL,
y = NULL,
R = NULL,
n_species = NULL,
n_sites = NULL,
n_iter = NULL){
## Tests to make sure correct inputs supplied
if(is.null(Beta)){
stop("Beta not supplied.")
}
if(is.null(X)){
stop("X not supplied.")
}
if(is.null(y)){
stop("y not supplied.")
}
if(is.null(R)){
stop("R not supplied.")
}
if(is.null(n_species)){
stop("n_species not supplied.")
}
if(is.null(n_iter)){
stop("n_iter not supplied.")
}
## Create an array of distribution mean values. Beta * X values
X <- as.matrix(X)
mean_values <- array(data = NA,
dim = c(n_sites,
n_species,
n_iter))
# for(i in seq_len(n_sites)){
#
# for(j in seq_len(n_species)){
#
# for(s in seq_len(n_iter)){
#
# mean_values[i, j, s] <- sum(X[i, ] * Beta[ , j, s])
#
# }
# }
# }
#
for(s in seq_len(n_iter)){
mean_values[, , s] <- as.matrix(X) %*% Beta[ , , s]
}
## Create a log_likelihood matrix full of NAs
log_lik <- matrix(NA,
nrow = n_sites,
ncol = n_iter)
## Calculate log likelihood values. Fill matrix with values as we go
### For each slice of array
for(s in seq_len(n_iter)){
#### Prediction for species assemblage at site i using values from slice a
likelihood <- lik_probit(obs = y,
mu = mean_values[ , , s],
R = R[ , , s],
log.p = FALSE,
niter = 1000)
#### Fill predictions array with value
log_lik[ , s] <- log(likelihood)
}
# ## Calculate single likelihood value for whole model
#
# ### Take the product of site-level likelihoods within a single sample
#
# sum_log_lik <- colSums(log_lik,
# na.rm = TRUE)
#
# ### Take the mean likelihood across samples
#
# mean_log_lik <- mean(sum_log_lik,
# na.rm = TRUE)
#
# return(mean_log_lik)
#
return(log_lik)
}
|
1b41dd620af205c1b943f9d4a7970b08c906e7d8 | bd38bc1e992871eecbceb352eec368a4b9e916ac | /man/makeEMAP.Rd | e0d96ceae5f4f56cfd5f13a8a9a0a271b20d55f0 | [
"MIT"
] | permissive | NElnour/BCB420.2019.ESA | 75e5822b3cf856abd549af5f3a308f76407b716e | efbce77d798e07db3923d35191dc48f97959502f | refs/heads/master | 2020-05-01T07:53:42.559953 | 2019-04-03T05:06:21 | 2019-04-03T05:06:21 | 177,363,127 | 1 | 0 | MIT | 2019-03-24T02:43:56 | 2019-03-24T02:43:56 | null | UTF-8 | R | false | true | 750 | rd | makeEMAP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeMaps.R
\name{makeEMAP}
\alias{makeEMAP}
\title{Generate the genetic interpretation map of BioGrid GGI tags of system's
physical interactions.}
\usage{
makeEMAP()
}
\value{
(dataframe) An 11-by-3 dataframe mapping BioGrid GGI tag to its
interpretation assuming that the system's components also interact
physically. The first column contains the official BioGRID GGI tags; the
second contains interpreted relationships under the assumption; the third
contains notes on interpretation.
}
\description{
Generate the genetic interpretation map of BioGrid GGI tags of system's
physical interactions.
}
\examples{
EMAP <- makeEMAP() # generates and loads the EMAP
}
|
3dbf1e0b21cdde32fb7900d35c93d915c22eae65 | 34f4a64a16fd1774db92a7505ece4524ac1b9383 | /Scripts/Week2Lecture.R | 54cb996c3f2381595bd62318df559fb2087929f1 | [] | no_license | BJKill/RProgWeek2 | 843f65a294655d569799da5fa1af32aa434fcc56 | 8fe0a27372afdaa36cfb6f49bb0c1d91499a9019 | refs/heads/master | 2022-10-06T08:36:26.051936 | 2020-06-07T01:26:57 | 2020-06-07T01:26:57 | 268,373,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,623 | r | Week2Lecture.R | ##Week 2 Lectures
#if/else statements
if(x >3) {
y <- 10
} else {
y <- 0
}
y <- if(x > 3) {
10
} else {
0
}
for(i in 1:10) {
print(i)
}
x <- c("a", "b", "c", "d")
##all will print each character in x
for(i in 1:4) {
print(x[i])
}
for(i in seq_along(x)) {
print(x[i])
}
for(letter in x) {
print(letter)
}
for(i in 1:4) print(x[i]) ##if you only have one command, no {} needed
##nested for loop to print each entry in matrix
x<-matrix(1:6, 2,3)
for(i in seq_len(nrow(x))) {
for(j in seq_len(ncol(x))) {
print(x[i,j])
}
}
##same thing but my way
for(i in 1:nrow(x)) {
for(j in 1:ncol(x)) {
print(x[i,j])
}
}
count <- 0
while(count < 10) {
print(count)
count <- count + 1
}
z<-5
while(z >= 3 && z <= 10) {
print(z)
coin <- rbinom(1,1,0.5) ##creates random binomial walk
if(coin == 1) {
z<- z + 1
} else {
z <- z - 1
}
}
x0 <- 0
tol <- 1e-4 ## 1x10^-4
repeat {
x1 <- rnorm(1)
if(abs(x1-x0) < tol) {
break
} else {
x0 <- x1
}
}
for(i in 1:100) {
if(i <= 20) {
next
}
print(i)
}
for(i in 1:100) {
print(i)
if(i > 20) { ##stops after 20 iterations but prints '21' bc print is before break
break
}
}
##funtion time!
add2 <- function(x,y) {
x + y
}
add2(3,5) #correctly returns 8
above10 <- function(x) {
use <- x > 10
x[use]
}
sue <- c(65, 2, 1000000, -10, 9)
above10(sue)
aboven <- function(x,n) {
use <- x > n
x[use]
}
aboven(sue,0)
aboven(sue,5)
t<-1:20
aboven(t,12)
#finding the mean of columns in a data frame or matrix
columnmean <- function(y, removeNA = TRUE) {
nc <- ncol(y) #dummy variable for # cols
means <- numeric(nc) #create empty numeric vector
for(i in 1:nc) {
means[i] <- mean(y[,i], na.rm = removeNA)
}
print(means)
}
columnmean2 <- function(y, removeNA = TRUE) {
means <- 1:ncol(y)
for(i in 1:ncol(y)) {
means[i] <- mean(y[,i], na.rm = removeNA)
}
print(means)
}
mat <- matrix(1:30, nrow = 5, ncol = 6)
columnmean(mat)
columnmean2(mat)
columnmean(airquality)
columnmean2(airquality)
#all expressions return same value although some better than others for clarity
mydata <- rnorm(100)
sd(mydata)
sd(x = mydata)
sd(x = mydata, na.rm = FALSE)
sd(na.rm = FALSE, mydata)
sd(na.rm = FALSE, x = mydata)
#nothing worked here
args(lm)
lm(data = mydata, y - x, model = FALSE, 1:100)
lm(y - x, data = mydata, 1:100)
f<-function() {
cat("Hello, world!\n")
}
f()
f <- function(a, b = 1, c = 2, d = NULL) {
}
#lazy evaluation. The function never uses b, so no input is fine
f <- function(a,b) {
a^2
}
f(2)
#will exicute until error, so a prints just fine
f <- function(a,b) {
print(a)
print(b)
}
f(45)
f(45,54)
#using '...'
myplot <- function(x, y, type = "1", ...) {
plot(x, y, type = type, ...)
}
args(paste)
args(cat)
##if '...' comes first, everything else after must be explicitly matched
lm <- function(x) {x * x}
lm
search()
rm(ls)
rm(z) ##for next function
f <- function(x,y) {
x^2 + y/z
}
f(2,4) #error because no 'z' in global environment
z = 2
f(2,4) #although no z input, it found it in glob env
make.power <- function(n) {
pow <- function(x) {
x^n
}
pow
}
cube <- make.power(3)
cube(3) #becomes a function and asks for x
square <- make.power(2)
square(3)
ls(environment(cube))
get("n", environment(cube))
ls(environment(square))
get("n", environment(square))
y<-10
f<-function(x) {
y <- 2
y^2 + g(x) #g(x) = x * 10 bc y=10 in the environment g was defined in
}
g<-function(x) {
x*y #Free variables in R functions take values from the environment in which the function is defined (10)
}
f(3) #returns 34
rm(y) #take 'y' out of global environment
g <- function(x) {
a <- 3
x+a+y
}
g(2) #returns error. no 'y' in global environment
y<-3
g(2) #ta daaaaa
##Optimization for parameters on a vector of data. #more advanced but needed for Stats
make.NegLogLik <- function(data, fixed=c(FALSE,FALSE)) {
params <- fixed
function(p) {
params[!fixed] <- p
mu <- params[1]
sigma <- params[2]
##Caclulate the Normal density by **minimizing negative LLE**
a <- -0.5*length(data)*log(2*pi*sigma^2)
b <- -0.5*sum((data-mu)^2)/sigma^2
-(a+b)
}
}
set.seed(1); normals <-rnorm(100,1,2)
nLL <- make.NegLogLik(normals)
nLL
nLL() #returned error. no 'p' defined
ls(environment(nLL))
optim(c(mu = 0, sigma = 1), nLL)$par #returns 1.2182 and 1.7873
#fixing sigma = 2
nLL2 <- make.NegLogLik(normals,c(FALSE,2))
optimize(nLL2, c(-1,3))$minimum #returns 1.2178
#fixing mu = 1
nLL1 <- make.NegLogLik(normals, c(1, FALSE))
optimize(nLL1, c(1e-6,10))$minimum #returns 1.8006
##plot it
nLL1 <- make.NegLogLik(normals, c(1,FALSE))
x <- seq(1.7, 1.9, len = 100)
y <- sapply(x, nLL1)
plot(x, exp(-(y-min(y))), type = "l") #'l' for line graph
Nll2 <- make.NegLogLik(normals, c(FALSE,2))
x2 <- seq(0.5, 1.5, len = 100)
y2 <- sapply(x2, Nll2)
plot(x2, exp(-(y2-min(y2))), type = "l")
##coding standards notes: 4 to 8 space indents w/ 80 col margin
##dates and times in R (YYYY-MM-DD)
#dates are 'Date' class. Stored internally as # days since 1970-01-01
#times are 'POSIXct' or 'POSIXlt' class. Stored int as #secs since 1970-01-01
x<- as.Date("1970-01-01")
x
unclass(x) #returns '0'
unclass(as.Date("1970-01-02")) #returns '0'
#'POSIXct' is a very large integer
#'POSIXlt' is a list with day of week, day of year, month, day of month
#'weekdays' gives day of week
#'months' gives month name
#'quarters' gives quarter number 'Q1', 'Q2', etc
x <- Sys.time()
x #now in "POSIXct' form
unclass(x) #returns '1591218051'
x$sec #returns error - wrong type
p <- as.POSIXlt(x) #change time type to list
names(unclass(p)) #look at names of list objects, see '$sec' '$hour' '$isdst' etc
p
unclass(p)
p$sec
p$hour
datestring <- c("January 10, 2012 10:40", "December 9, 2011 9:10")
x <- strptime(datestring, "%B $d, %Y %H:%M") #%B = unabrev Month, %Y = YYYY
x
class(x)
?strptime
x <- as.Date("2012-01-01")
unclass(x)
y <- strptime("9 Jan 2011 11:34:21", "%d %b %Y %H:%M:%S")
unclass(y)
x-y ##x-y returns error b/c x is 'ct' and y is 'lt'
x<-as.POSIXlt(x) ##change x to 'lt' time
x-y ##returns 'Time difference of 356.3 days'
x <- as.Date("2012-03-01")
y <- as.Date("2012-02-28")
x-y ##returns 'Time difference of 2 days' bc it knows about leap year!
x <- as.POSIXct("2012-10-25 01:00:00")
y <- as.POSIXct("2012-10-25 06:00:00", tz = "GMT")
y-x ##returns 'Time difference of 1 hours' bc it knows time zones!
|
7bba7d1f6c3462b04b064c64ef8a5c024ba57ada | 3bfd75b66797d12f15ce05a3f2194067e4450dc9 | /Legacy/master_source.R | b9d3d480aab8cdae639d6286ff03da1351a349c3 | [] | no_license | lukeasteer/NHL-injury-value | c7e230515f6be4fa3d9ec0e8e3f0791184b4e861 | e5fe171e88cb8a7857725a1389b0761de7929992 | refs/heads/master | 2020-03-22T07:59:49.727430 | 2019-08-12T20:55:12 | 2019-08-12T20:55:12 | 139,737,950 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,411 | r | master_source.R | # Install and load packages
install.packages("tidyverse")
library(tidyverse)
# Load WAR data, injury data
war_db <- read_csv("war_ratings_2018-04-05.csv")
injury_db <- read_csv("NHL_Injury_Database_data.csv")
# Create a new WAR df with the required data
war82_db <- war_db %>%
select(player = Player, season = Season, team = Team, games_played = GP, toi = TOI, war_82 = `WAR/82`) %>%
# Handle players who were traded mid-season
separate(team, c("team_one","team_two", "team_three"), sep = '/') %>%
# Create games_missed var, for comparison to games_missed_injury; account for absences for personal reasons, etc.
mutate(games_missed = 82 - games_played)
# Create a new injury df with the required data
clean_injury_db <- injury_db %>%
select(chip = Chip, cap_hit = `Cap Hit`, games_missed_injury = `Games Missed`, injury_type = `Injury Type`,
player = Player, position = Position, season = Season, team = Team, -`Number of Records`) %>%
# Re-format player names
separate(player, c("last_name","first_name"), sep = ',') %>%
mutate(player = paste0(first_name, '.', last_name) %>% str_to_upper) %>%
# Re-format season
separate(season, c("start_year","end_year"), sep = '/') %>%
mutate(season = paste0(start_year, '-20', end_year)) %>%
# Re-format position; split into position and status
separate(position, c("position_new", "status"), sep = '"') %>%
# Filter out all retired players
filter(status != retired) %>%
# Aggregate injuries on a per-season basis
group_by(player, team, season, position_new, cap_hit) %>%
summarise(total_games_missed_injury = sum(games_missed_injury), total_chip = sum(chip))
# Re-arrange columns
#war82_db <- war82_db[, c(1:6, 9, 7, 8)]
#clean_injury_db <- clean_injury_db[, c(1, 2, 5, 3,) ] # Not done
# Create two named lists of NHL teams for re-formatting
team_names_short <- c("ANA" = "Anaheim", "ARI/PHX" = "Arizona/Phoenix", "BOS" = "Boston", "BUF" = "Buffalo", "CGY" = "Calgary",
"CAR" = "Carolina", "CHI" = "Chicago", "COL" = "Colorado", "CBJ" = "Columbus", "DAL" = "Dallas",
"DET" = "Detroit", "EDM" = "Edmonton", "FLA" = "Florida", "LAK" = "Los Angeles", "MIN" = "Minnesota",
"MTL" = "Montreal", "NSH" = "Nashville", "NJD" = "New Jersey", "NYI" = "NY Islanders", "NYR" = "NY Rangers",
"OTT" = "Ottawa", "PHI" = "Philadelphia", "PIT" = "Pittsburgh", "SJS" = "San Jose", "STL" = "St. Louis",
"TBL" = "Tampa Bay", "TOR" = "Toronto", "VAN" = "Vancouver", "VGK" = "Vegas", "WSH" = "Washington",
"WPG/ATL" = "Winnipeg/Atlanta")
team_names_short2 <- c("LAK" = "L.A", "NJD" = "N.J", "TBL" = "T.B")
# Re-format team names
#clean_injury_db$team <- names(team_names_short)[match(clean_injury_db$team, team_names_short)]
# Examine players who were traded to determine if injured games were correctly attributed
#traded_players <- war82_db %>%
#filter(team_two != "NA") %>%
#left_join(clean_injury_db, by = c("player", "season"))
# Properly attribute injured games
# Join WAR df, injury df
#injury_value_db <- clean_injury_db %>%
#left_join(war82_db, by = c("player", "season"))
# Create team injury summaries
#injury_value_db_team <- injury_value_db %>%
#group_by(team, season) %>%
#summarise(total_team_games_missed = sum(total_games_missed), total_team_chip = sum(total_chip)) %>%
#ungroup() |
37068ce8f9cbbaf34db8cca05df3e2bd67e3c9c6 | 41c9353fb07b34eec71b663554a207cfd59f4548 | /tpR_note_ex6.R | 933c84ec8b134bdf9e8886fd45c51a66459ee6c9 | [] | no_license | Acathignol/R_note | f02d6b61404cd274d913d19d2a3b34d67b714c15 | 8339e48649284e0f2253828aa9e95dabd17146e7 | refs/heads/master | 2021-01-11T11:07:50.483568 | 2016-01-29T15:41:56 | 2016-01-29T15:41:56 | 50,662,788 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,928 | r | tpR_note_ex6.R | rm(list=ls())
notes.m=matrix(nr=30,nc=20)
for (i in 1:20){notes.m[,i]=round(rnorm(30,10,abs(rnorm(1,mean=1,sd=0.3))), digit=2)}
colnames(notes.m)= c("E1","E2", "E3", "E4", "E5","E6","E7","E8","E9","E10","E11",
"E12","E13","E14","E15","E16","E17","E18","E19","E20")
notes=as.vector(t(notes.m))
etu=sort(rep(as.factor(1:30),20))
plot(notes~etu)
anova(lm(notes~etu))
#creer matrice 30par20
#rentre des notes selon loi normale
#nomme evaluation
#met les notes en vecteurs
#les trie dans l'ordre croissant
#boxplot notes pour les afficher
#ANOVA=??????????????????????????????????????????? analyse de variance notes
pValues<-c()
count=0
for (i in 1:30){
count=count+1
for(j in 1:30){
t.test(etu)
pValues[count]<-pvalue
count=count+1}
}#SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE how to do
#recup p value t test, ECRIT !!!!!!!!!!!!!!!!!!! => sinon , "names(objet) => acceder dolar //// sinon chercher type objet(plu dur)
#selon la simul, ANOVA donne un summary de Df , sum, sq, mean, sq, F , value , Pr pour tout les étudiants
#y qttitatif mais x qualitatif => effet des notes entre les eleves (var notes entre eleve par rapport entre var des note pour un eleves
#=> inter vs intra => ANOVA (ou LM)) si p-value significative => les eleves ont pas la mm notes si non => tout les eleves ont globalement mm notes
#on trouvera pas significatif => tt eleve ont mm note (H0 vrai de vrai)
#tt les eleves 1 à 1 => test de t
#rejet H0 alors que vraie (on trouve significative alors que faux)
# avec 435 test => prob de se tromper au moins une fois = 1
#COMMENT FAIRE SANS? => alpha=0.05/435 ATENTTION PB !!!!! PERTE DE TOUTE LA PUISSANCE DU TEST !!!!!!!!!!!!!!!!
#donc suite exo car plus cap de voir un meilleur qu'un autre
# donc controle de risque
#--------------------------------------------------------------------------------------------------------------
library(multcomp)
test.none <- pairwise.t.test(notes, etu,p.adj="none")
summary(as.vector(test.none$p.value<0.05))
#--------------------------------------------------------------------------------------------------------------
notes.m=matrix(nr=30,nc=20)
for (i in 1:20){notes.m[,i]=round(rnorm(30,10,abs(rnorm(1,mean=1,sd=0.3))), digit=2)}
colnames(notes.m)= c("E1","E2", "E3", "E4", "E5","E6","E7","E8","E9","E10","E11",
"E12","E13","E14","E15","E16","E17","E18","E19","E20")
notes.m[5,]=notes.m[5,]+1
notes=as.vector(t(notes.m))
etu=sort(rep(as.factor(1:30),20))
plot(notes~etu)
anova(lm(notes~etu))
test.none <- pairwise.t.test(notes, etu,p.adj="none")
summary(as.vector(test.none$p.value<0.05))
test.bonf <- pairwise.t.test(notes, etu,p.adj="bonferroni")
summary(as.vector(test.bonf$p.value<0.05))
((test.bonf$p.value/test.none$p.value)[test.bonf$p.value!=1])
#--------------------------------------------------------------------------------------------------------------
notes.m=matrix(nr=30,nc=20)
for (i in 1:20){notes.m[,i]=round(rnorm(30,10,abs(rnorm(1,mean=1,sd=0.3))), digit=2)}
colnames(notes.m)= c("E1","E2", "E3", "E4", "E5","E6","E7","E8","E9","E10","E11",
"E12","E13","E14","E15","E16","E17","E18","E19","E20")
notes.m[1:30,]=notes.m[1:30,]+seq(-1, +1, le=30)
notes.m=notes.m[sample(1:30,30, rep=F),]
notes=as.vector(t(notes.m))
etu=sort(rep(as.factor(1:30),20))
plot(notes~etu)
anova(lm(notes~etu))
test.none <- pairwise.t.test(notes, etu,p.adj="none")
summary(as.vector(test.none$p.value<0.05))
test.bonf <- pairwise.t.test(notes, etu,p.adj="bonferroni")
summary(as.vector(test.bonf$p.value<0.05))
test.holm <- pairwise.t.test(notes, etu,p.adj="holm")
summary(as.vector(test.holm$p.value<0.05))
test.BH <- pairwise.t.test(notes, etu,p.adj="BH")
summary(as.vector(test.BH$p.value<0.05))
#-------------------------------------------------------------------------------------------------------------- |
179f81a322fab74a9b9db00b5ffa07bfb82f28bb | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/gdpc/man/plot.gdpc.Rd | 1ea60dbeb8f9ce204137b549885ba5e7bce68a72 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,264 | rd | plot.gdpc.Rd | \name{plot.gdpc}
\alias{plot.gdpc}
\title{Plot Generalized Dynamic Principal Components}
\description{
Plots a \code{gdpc} object.
}
\usage{
\method{plot}{gdpc}(x, which = 'Component', which_load = 0, \dots)
}
\arguments{
\item{x}{An object of class \code{gdpc}, usually the result of \link{gdpc} or one of the entries of the result of \link{auto.gdpc}.}
\item{which}{String. Indicates what to plot, either 'Component' or 'Loadings'. Default is 'Component'.}
\item{which_load}{Lag number indicating which loadings should be plotted. Only used if which = 'Loadings'. Default is 0.}
\item{\dots}{Additional arguments to be passed to the plotting functions.}
}
\author{
Daniel Peña, Ezequiel Smucler, Victor Yohai
}
\seealso{
\code{\link{gdpc}}, \code{\link{auto.gdpc}}, \code{\link{plot.gdpcs}}
}
\examples{
T <- 200 #length of series
m <- 200 #number of series
set.seed(1234)
f <- rnorm(T + 1)
x <- matrix(0, T, m)
u <- matrix(rnorm(T * m), T, m)
for (i in 1:m) {
x[, i] <- 10 * sin(2 * pi * (i/m)) * f[1:T] + 10 * cos(2 * pi * (i/m)) * f[2:(T + 1)] + u[, i]
}
#Choose number of lags using the LOO type criterion.
#k_max=3 to keep computation time low
autofit <- auto.gdpc(x, k_max = 3)
plot(autofit[[1]], xlab = '', ylab = '')
}
\keyword{ts} |
42fb32c26704f2963333e4f70e4a7e8614af75a7 | f2743ecb30a75bd0cf3e46082b3198640b4f043d | /man/transCoord.Rd | d63b121410db09308c7cd34fae9175d5553c6b89 | [
"MIT"
] | permissive | daewoooo/primatR | 561fa3eb0a988cdae85c2e51efd9777a2f603023 | 71d23a8d1056d936470a568036fc41ffe3db0018 | refs/heads/master | 2022-04-29T06:48:35.738394 | 2022-04-21T17:04:37 | 2022-04-21T17:04:37 | 153,538,061 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 523 | rd | transCoord.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{transCoord}
\alias{transCoord}
\title{Transform genomic coordinates}
\usage{
transCoord(gr)
}
\arguments{
\item{gr}{A \code{\link{GRanges-class}} object.}
}
\value{
The input \code{\link{GRanges-class}} with two additional metadata columns 'start.genome' and 'end.genome'.
}
\description{
Add two columns with transformed genomic coordinates to the \code{\link{GRanges-class}} object. This is useful for making genomewide plots.
}
|
a9cc745beecc050816e31832be2df579a02bc7e2 | 4f9b8fae103c72ae0c7b0796ec1562f02f69851d | /ps1/2B.R | 7300e7bcef20a21087fdb69ffbf8abd350781926 | [] | no_license | cjpatton/256 | 3b14a0e0453a33563506256f3774a92101a39c7a | 6f88419b8dbe93d3959247d37d0ecbb8f57beb30 | refs/heads/master | 2020-02-26T15:18:13.075363 | 2014-03-12T21:04:57 | 2014-03-12T21:04:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,896 | r | 2B.R |
# DES.R: R routines for discrete-event simulation (DES), with an example
# matrix version; data frame allows character event types, but much too slow
# all data is stored in an R environment variable that will be referrred
# to as simlist below
# the simlist will consist of the following components:
#
# currtime: current simulated time
# evnts: the events list, a matrix
# reactevent: event handler, user-supplied; creates new
# events upon the occurrence of an old one;
# e.g. job arrival triggers either start of
# service for the job or queuing it; call form is
# reactevent(evnt,simlist)
# dbg: if TRUE, will print evnts above after each event
# scheduling action, and enter R browser for single-stepping
# etc.
# the application code can add further application-specific data to
# simlist, e.g. total job queuing time
# each event will be represented by a matrix row consisting of:
#
# occurrence time
# event type (user-defined numeric code)
#
# and application-specific information, if any
# library functions (do not alter):
#
# newsim: create a new simlist
# insevnt: insert a new event into evnts in the simlist
# schedevnt: schedule a new event (determine its occurrence time
# and call insevnt())
# getnextevnt: pulls the earliest event from the event list,
# process it, and update the current simulated
# time
# mainloop: as the name implies
# appendtofcfsqueue: append job to a FCFS queue
# delfcfsqueue: delete head of a FCFS queue
# outline of a typical application:
# mysim <- newsim() create the simlist
# set reactevent in mysim
# set application-specific variables in mysim, if any
# set the first event in mysim$evnts
# mainloop(mysim,mysimtimelim)
# print results
# create a simlist, which will be the return value, an R environment
newsim <- function(dbg=F) {
simlist <- new.env()
simlist$currtime <- 0.0 # current simulated time
simlist$evnts <- NULL # event list
simlist$dbg <- dbg
simlist
}
# insert event evnt into evnts in simlist
insevnt <- function(evnt,simlist) {
# if the event list is empty, set it to consist of evnt and return
if (is.null(simlist$evnts)) {
simlist$evnts <- matrix(evnt,nrow=1)
return()
}
# otherwise, find insertion point
inspt <- binsearch(simlist$evnts[,1],evnt[1])
# now "insert," by reconstructing the matrix; we find what portion of
# the current matrix should come before evnt and what portion should
# come after it, then string everything together
before <- if (inspt == 1) NULL else simlist$evnts[1:(inspt-1),]
nr <- nrow(simlist$evnts)
after <- if (inspt <= nr) simlist$evnts[inspt:nr,] else NULL
simlist$evnts <- rbind(before,evnt,after)
rownames(simlist$evnts) <- NULL
}
# schedule new event in evnts in simlist; evnttime is the time at
# which the event is to occur; evnttype is the event type; appdata is
# a vector of numerical application-specific data
schedevnt <- function(evnttime,evnttype,simlist,appdata=NULL) {
evnt <- c(evnttime,evnttype,appdata)
insevnt(evnt,simlist)
}
# start to process next event (second half done by application
# programmer via call to reactevnt() from mainloop())
getnextevnt <- function(simlist) {
head <- simlist$evnts[1,]
# delete head
if (nrow(simlist$evnts) == 1) simlist$evnts <- NULL else
simlist$evnts <- simlist$evnts[-1,,drop=F]
return(head)
}
# main loop of the simulation
mainloop <- function(simlist,simtimelim) {
while(simlist$currtime < simtimelim) {
head <- getnextevnt(simlist)
# update current simulated time
simlist$currtime <- head[1]
# process this event (programmer-supplied ftn)
simlist$reactevent(head,simlist)
if (simlist$dbg) {
print("event occurred:")
print(head)
print("events list now")
print(simlist$evnts)
browser()
}
}
}
# binary search of insertion point of y in the sorted vector x; returns
# the position in x before which y should be inserted, with the value
# length(x)+1 if y is larger than x[length(x)]; this could be replaced
# by faster C code
binsearch <- function(x,y) {
n <- length(x)
lo <- 1
hi <- n
while(lo+1 < hi) {
mid <- floor((lo+hi)/2)
if (y == x[mid]) return(mid)
if (y < x[mid]) hi <- mid else lo <- mid
}
if (y <= x[lo]) return(lo)
if (y < x[hi]) return(hi)
return(hi+1)
}
# appendtofcfsqueue() and delfcfsqueuehead() below assume the
# application code has one or more queues, each queue stored as a
# list-of-lists, with each individual list being the information for one
# queued job; note that one must initialize the list-of-lists as NULL
# appends jobtoqueue to the given queue, assumed of the above form;
# the new, longer list is returned
appendtofcfsqueue <- function(queue,jobtoqueue) {
lng <- length(queue)
queue[[lng+1]] <- jobtoqueue
queue
}
# deletes head of queue; assumes list-of-lists structure as decribed
# above; returns the head and new queue
delfcfsqueuehead <- function(queue) {
qhead <- queue[[1]]
newqueue <- queue[-1]
# careful!--an empty list is not NULL
if (length(queue) == 1) newqueue <- NULL
list(qhead=qhead,newqueue=newqueue)
}
## Start of our code.
# Nurse Problem
# Parameters
# n :: limit on active nurses
# q :: limit on queue size
# p :: timeout time
# d :: mean of call duration (exponential)
# r :: mean of call arrival (uniform? [0, 2r])
# (stated in problem that there is an issue with exponential arrival, but I don't see why)
# (Is it that it doesn't make sense as a policy if arrival time is exponential?)
cc2b <- function(d, r, n, q, p, timelim, dbg=F) {
# Event types:
# 1 - call arrived
# 2 - call ended
# 3 - timeout
# Set up simulation list, specify event handler.
simlist <- newsim(dbg)
simlist$reactevent <- cc2breact
# Parameters required by factoryreact()
simlist$lambda_d = 1/d
simlist$r = r #for uniform
simlist$lambda_r = 1/r #for exp
simlist$n <- n # max active nurses
simlist$q <- q # max queue size
simlist$p <- p
# initial conditions : one active nurse, no calls in queue
simlist$i_n <- 1 # active nurse
simlist$i_i <- 1 # idle nurses
simlist$i_q <- 0 # queued calls
# We must generate the first event and handle it.
# Since the simulation starts the queue will be empty,
# the first event will be a call arriving
tta <- runif(1, 0, 2*r)
# tta <- rexp(1, 1/r) # for exp
## Is this necessary? Couldn't you just use the timelim passed to the system?
## Ohhh, right, it's used to tally up the total time each system exists. I can use that.
simlist$lasttime <- 0.0
simlist$activeTime <- 0.0
simlist$idleTime <- 0.0
schedevnt(tta, 1, simlist)
# Flag for timeout
simlist$reset <- F
simlist$nextTimeout <- p
# The way I'm configuring this, there is also a running timeout event.
# This will just continually run, setting the next timeout to the value above.
# so I need to start this event as well.
if(p > 0){ # only start it if there is a positive timeout value! Otherwise, ignore.
schedevnt(p, 3, simlist)
}
# Running totals for dropped calls
simlist$rej <- 0
simlist$tot <- 0
# Enter main loop (calls factoryreact()).
mainloop(simlist, timelim)
# Report average number of machines running.
simlist$time <- simlist$idleTime / simlist$activeTime
proRej <- simlist$rej / simlist$tot
print("Proportion of calls rejected")
print(proRej)
print("Proportion of nurse idle time")
print(simlist$time)
}
# Our reactevent(). Transition to new state
# and generate next event.
cc2breact <- function(evnt, simlist) {
etype <- evnt[2]
# Transition state.
if (etype == 1){ # call arrived
simlist$reset <- T
simlist$nextTimeout <- simlist$currtime + simlist$p
simlist$tot <- simlist$tot + 1
print ("-----Call arrived at time-----")
print (evnt[1])
if(simlist$i_i > 0){ # if idle nurses to take call (queue empty)
# active time calculations
# Essentially, I want to do this calculation whenever the idle or active
# list changes, for any reason. $lasttime will be set to the last time it changed.
delta <- simlist$currtime - simlist$lasttime
simlist$activeTime <- simlist$activeTime + (simlist$i_n * delta)
simlist$idleTime <- simlist$idleTime + (simlist$i_i * delta)
simlist$lasttime <- simlist$currtime
# Now that that's sorted, we can move on
simlist$i_i <- simlist$i_i - 1
# new event: call ended
tte <- rexp(1, simlist$lambda_d)
schedevnt(simlist$currtime + tte, 2, simlist)
print ("an idle nurse takes the call")
} else if(simlist$i_q < simlist$q){ # if queue not full, no nurses to take call
simlist$i_q <- simlist$i_q + 1
print ("no nurse call take the call, queue is not full")
} else { # queue full, call dropped, new active nurse
simlist$rej <- simlist$rej + 1
if(simlist$i_n < simlist$n){ # if nurse limit not reached
# active time calculations
delta <- simlist$currtime - simlist$lasttime
simlist$activeTime <- simlist$activeTime + (simlist$i_n * delta)
simlist$idleTime <- simlist$idleTime + (simlist$i_i * delta)
simlist$lasttime <- simlist$currtime
# Add one nurse, take call from queue
simlist$i_n = simlist$i_n + 1
simlist$i_q = simlist$i_q - 1
# new event: call ended
tte <- rexp(1, simlist$lambda_d)
schedevnt(simlist$currtime + tte, 2, simlist)
}
print ("queue is full, grab new nurse, drop top call")
}
print ("Stats so far:")
print ("total calls")
print (simlist$tot)
print ("total rejected calls")
print (simlist$rej)
print ("active nurses")
print (simlist$i_n)
print ("idle nurses")
print (simlist$i_i)
print ("active nurse time")
print (simlist$activeTime)
print ("idle nurse time")
print (simlist$idleTime)
# new event: call arrival
tta <- runif(1, 0, 2*simlist$r)
schedevnt(simlist$currtime + tta, 1, simlist)
} else if (etype == 2) { # call ended
print ("-----Call ended at time-----")
print (evnt[1])
if(simlist$i_q > 0){ # calls in queue
simlist$i_q <- simlist$i_q - 1
# new event: call ended
tte <- rexp(1, simlist$lambda_d)
schedevnt(simlist$currtime + tte, 2, simlist)
print ("there are calls in queue ")
} else { # queue empty, new idle nurse
# active time calculations
delta <- simlist$currtime - simlist$lasttime
simlist$activeTime <- simlist$activeTime + (simlist$i_n * delta)
simlist$idleTime <- simlist$idleTime + (simlist$i_i * delta)
simlist$lasttime <- simlist$currtime
# new idle nurse
simlist$i_i <- simlist$i_i + 1
if(simlist$p <= 0 && simlist$i_n > 1){ # if no timeout value and more than 1 active nurse
simlist$i_i <- simlist$i_i - 1
simlist$i_n <- simlist$i_n - 1
}
print("queue is empty - new idle nurse")
}
print ("Stats so far:")
print ("total calls")
print (simlist$tot)
print ("total rejected calls")
print (simlist$rej)
print ("active nurses")
print (simlist$i_n)
print ("idle nurses")
print (simlist$i_i)
print ("active nurse time")
print (simlist$activeTime)
print ("idle nurse time")
print (simlist$idleTime)
} else if (etype == 3) { # timeout
print ("-----Timeout happened at time-----")
print (evnt[1])
if(simlist$reset) {
print ("previously arrived call has reset this arrival")
# timeout has been reset by a call arriving
simlist$reset <- F
} else if (simlist$i_n == 1) {
# only one nurse left
# Reset timeout
simlist$nextTimeout <- simlist$currtime + simlist$p
print ("only one nurse is left, reset timeout") #probably don't even need to reset it, the next call that arrives will reset it
} else if (simlist$i_i > 0) { # Some idle nurses ( >1)
# active time calculations
delta <- simlist$currtime - simlist$lasttime
simlist$activeTime <- simlist$activeTime + (simlist$i_n * delta)
simlist$idleTime <- simlist$idleTime + (simlist$i_i * delta)
simlist$lasttime <- simlist$currtime
# remove nurses from active pool
simlist$i_n <- simlist$i_n - 1
simlist$i_i <- simlist$i_i - 1
# reset timeout
simlist$nextTimeout <- simlist$currtime + simlist$p
print ("there are idle nurses, one of them is sent to inactive")
} else { # no idle nurses
simlist$nextTimeout <- simlist$currtime + simlist$p
print ("no idle nurses - reset timer?") # is this correct logic? what if there are active nurses that are on the phone and when the next call is finished, a nurse should be sent home?
}
# new event : next timeout
schedevnt(simlist$nextTimeout, 3, simlist)
print ("Stats so far:")
print ("total calls")
print (simlist$tot)
print ("total rejected calls")
print (simlist$rej)
print ("active nurses")
print (simlist$i_n)
print ("idle nurses")
print (simlist$i_i)
print ("active nurse time")
print (simlist$activeTime)
print ("idle nurse time")
print (simlist$idleTime)
}
# Okay, here's where my design is different, with DES.
# Yes, I know the memoryless property says you can schedule things
# such that there is exactly one event rolling. But honestly,
# that just obscures intuition to me. The intuition is that all
# of these things run in parallel, so I schedule multiple events
# at once. Because of the memoryless property, this is equivalent
# to scheduling them sequentially.
}
|
902225d778e86101723114a89c46a918e592e251 | 814850b76f30647f6b85c191369a6e33a91bc7eb | /man/visualize_network.Rd | 245ff9469cbb2dac4d9e92e672b3ca770a3448fe | [] | no_license | tanelp/riigikogu | 97186cd5dfa8a61ee1b15991feccf2704fe324b3 | 70dfb7d821b9339a2149a6d71fc6f810ed5e13ab | refs/heads/master | 2020-06-06T15:45:19.542867 | 2019-06-19T18:42:03 | 2019-06-19T18:42:03 | 192,782,306 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,369 | rd | visualize_network.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/votings.R
\name{visualize_network}
\alias{visualize_network}
\title{Plots a political polarization graph.}
\usage{
visualize_network(nodes, edges, prob = 0.7, min_count = 15,
align = FALSE, seed = 42)
}
\arguments{
\item{nodes}{a dataframe containing summary information about parliament members.}
\item{edges}{adjacency matrix representing co-occurrences between members' votes.}
\item{prob}{a probability threshold to binarize the adjacency matrix.}
\item{min_count}{remove nodes that have less than \code{min_count} votes.}
\item{align}{rotate the graph such that coaliation is on the right side.}
\item{seed}{a random seed.}
}
\value{
A political polarization graph.
}
\description{
Plots a political polarization graph.
}
\examples{
start_date = "2016-11-23"
end_date = "2019-04-29"
votings = get_votings(start_date, end_date)
votings = filter(votings, type.code == "AVALIK")
votes_list = lapply(votings$uuid, get_votes)
has_no_votes = sapply(votes_list, is.null)
votes_list = votes_list[!has_no_votes]
nodes = get_nodes(votes_list)
edges = get_edges(nodes, votes_list)
nodes$in_coalition = is_in_coalition(nodes, start_date, end_date)
p = visualize_network(nodes, edges, align=TRUE) +
labs(title=toupper("Political polarization"),
subtitle="Jüri Ratas' I cabinet")
}
|
35eebf9df0f6ef8e49accc47816ce508b4f6b482 | 13ff4289823f5aaab445bb629bd7f5d9041e99a3 | /man/flr.summary.old.Rd | e6f4f463976ee772a2af4661f8062d8fe241cb94 | [] | no_license | G-Thomson/gthor | 341d10c5fa5f4abe8f21d1243a725a332bf7783a | 3c6bf244bfde3afe6e95381a1f5651d5797a0fe0 | refs/heads/master | 2021-01-17T15:43:33.911368 | 2018-01-25T21:33:00 | 2018-01-25T21:33:00 | 75,253,525 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 875 | rd | flr.summary.old.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flowering_time.R
\name{flr.summary.old}
\alias{flr.summary.old}
\title{flr.summary.old}
\usage{
flr.summary.old(data, LoI)
}
\arguments{
\item{data}{This is a dataframe of flowering time fata with the following columns; Sowing.Number,
Conditions, Genotype, Plant.Number, Date.Sown, Date.Flowered,
Number.of.Days.to.First.Floral.Bud, Position.of.1st.Floral.Bud,
Node.Number.of.Main.Axis.at.1st.Floral.Bud and Comments.}
\item{LoI}{These are the Lines of Interest to be examined. Enter these as a vector of sowing
numbers as characters.}
}
\value{
This function summarises a flowering time data frame producing a list of the
summarised data and relevant raw data
}
\description{
flr.summary.old
}
\examples{
data = Mid_2016
LoI = c("P912", "P913") # Lines of interest
d <- flr.summary(data, LoI)
}
|
bf44d40f1fa3f87ad7f84d0a1c1b9b612bb6f49c | 7fa6fb23897c9ad195bdc22882c81c8ca046c851 | /man/ASCN.ASCAT.Rd | 072387de78fe01820b60e700a94c9db81708b64d | [
"MIT"
] | permissive | gustaveroussy/EaCoN | 65b319ad9ceb49ea57fb597df891f985b8d1f118 | d65eba1546805a538b3d861eacde14a96d9af841 | refs/heads/master | 2021-11-16T15:24:40.089746 | 2021-10-18T15:34:19 | 2021-10-18T15:34:19 | 117,849,090 | 22 | 16 | MIT | 2020-10-07T08:23:32 | 2018-01-17T14:42:50 | R | UTF-8 | R | false | false | 1,450 | rd | ASCN.ASCAT.Rd | \name{ASCN.ASCAT}
\alias{ASCN.ASCAT}
\title{Allele-Specific Copy Number estimation using ASCAT}
\usage{
ASCN.ASCAT(data = NULL, gammaRange = c(.35,.95), nsubthread = 1,
cluster.type = "PSOCK", out.dir = getwd(), force = FALSE, ...)
}
\arguments{
\item{data}{Data object contained in a RDS file generated with \code{Segment.ASCAT()} (or corresponding wrappers \code{Segment.ASCAT.ff()} and \code{Segment.ASCAT.ff.Batch()}.}
\item{gammaRange}{The range of gamma values to assess the modelling of ASCN (see \code{?ASCAT::ascat.runAscat}).}
\item{nsubthread}{Number of subthreads for multithreading.}
\item{cluster.type}{Type of subthreading cluster (see \code{?parallel::makeCluster()}).}
\item{out.dir}{Output directory.}
\item{force}{If some result files already exist, delete them before runing.}
\item{...}{Any additional parameter to give to \code{ASCAT::ascat.runAscat()}.}
}
\description{
This function performs the estimation of allele-specific copy number using ASCAT.
}
\value{
Several RDS objects, PNG plots, CBS files and data tables in several folders containing the different ASCN (allele-specific copy number) analyses performed for each value in \code{gammaRange}. The optimal fit among the generated ones will be output in a separate text file.
}
\details{
Please note that depending on noise and complexity of the L2R and BAF profiles, ASCAT will not always be able to perform ASCN.
}
\author{
Bastien Job
}
|
26baee728885ceafd92bd942b65dd26bc2b89b2d | 52338354bc84c147cde8caf40aa71f1f77462973 | /man/import_table.Rd | 9bd002645753819e7fa1f21f0529996d9e463c04 | [] | no_license | intiluna/flyio | b420d43724ca5a18011f0f2b1de49c75caf904d6 | a226fedcdb59818b50445383ffc15378d9cb6205 | refs/heads/master | 2023-05-25T00:46:06.349036 | 2020-02-11T10:55:21 | 2020-02-11T10:55:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,357 | rd | import_table.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_table.R
\name{import_table}
\alias{import_table}
\title{Read csv, Excel files, txt}
\usage{
import_table(file, FUN = data.table::fread,
data_source = flyio_get_datasource(),
bucket = flyio_get_bucket(data_source), dir = flyio_get_dir(),
delete_file = TRUE, show_progress = FALSE, ...)
}
\arguments{
\item{file}{path of the file to be read}
\item{FUN}{the function using which the file is to be read}
\item{data_source}{the name of the data source, if not set globally. s3, gcs or local}
\item{bucket}{the name of the bucket, if not set globally}
\item{dir}{the directory to store intermediate files}
\item{delete_file}{logical. to delete the file downloaded}
\item{show_progress}{logical. Shows progress of the download operation}
\item{...}{other parameters for the FUN function defined above}
}
\value{
the output of the FUN function
}
\description{
Read tabular data from anywhere using a function defined by you
}
\examples{
# for data on local
filepath = system.file("extdata", "mtcars.csv", package = "flyio")
data = import_table(filepath, FUN = read.csv, data_source = "local")
\dontrun{
# for data on cloud
flyio_set_datasource("gcs")
flyio_set_bucket("your-bucket-name")
data = import_table("excel-file-on-gcs.xlsx", read_excel, dir = tempdir())
}
}
|
aa053841d39453cd4665c5c042ebc4940e631af9 | fd0622e97276bba2c04d3c2fcba902cdfb65e214 | /packages/nimble/tests/testthat/test-mcmcrj.R | 6889d85d75ab5680cf17106bff71f05fc577cff8 | [
"BSD-3-Clause",
"CC-BY-4.0",
"GPL-2.0-only",
"GPL-1.0-or-later",
"MPL-2.0",
"GPL-2.0-or-later"
] | permissive | nimble-dev/nimble | 7942cccd73815611e348d4c674a73b2bc113967d | 29f46eb3e7c7091f49b104277502d5c40ce98bf1 | refs/heads/devel | 2023-09-01T06:54:39.252714 | 2023-08-21T00:51:40 | 2023-08-21T00:51:40 | 20,771,527 | 147 | 31 | BSD-3-Clause | 2023-08-12T13:04:54 | 2014-06-12T14:58:42 | C++ | UTF-8 | R | false | false | 17,441 | r | test-mcmcrj.R | source(system.file(file.path('tests', 'testthat', 'test_utils.R'), package = 'nimble'))
RwarnLevel <- options('warn')$warn
options(warn = 1)
nimbleVerboseSetting <- nimbleOptions('verbose')
nimbleOptions(verbose = FALSE)
nimbleProgressBarSetting <- nimbleOptions('MCMCprogressBar')
nimbleOptions(MCMCprogressBar = FALSE)
context('Testing of MCMC_RJ functionality')
test_that("Test configureRJ with no indicator variables", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * x1[i] + beta2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## One node
nodes <- c("beta2")
expect_error(configureRJ(mConf, nodes),
"configureRJ: Provide 'indicatorNodes' or 'priorProb' vector")
#####################################
## One node, multiple parameters
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(fixedValue = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
## priorProb not probabilities
expect_error(configureRJ(mConf, nodes, prior = -1))
expect_error(configureRJ(mConf, nodes, prior = 2))
#####################################
## Multiple nodes, less paramters
nodes <- c("beta0", "beta1", "beta2")
expect_error(configureRJ(mConf, nodes, prior = c(0.5, 0.5)),
"configureRJ: Length of 'priorProb' vector must match 'targetNodes' length.")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(fixedValue = c(0,1))),
"configureRJ: inconsistent length")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(mean = c(0,1))),
"configureRJ: inconsistent length")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(scale = c(2,1))),
"configureRJ: inconsistent length")
#####################################
## priorProb not probabilities
expect_error(configureRJ(mConf, nodes, prior = c(0.5, 2, 0.2)),
"configureRJ: elements in priorProb")
})
test_that("Test configureRJ with multivariate node - no indicator", {
##############################
## Multivariate node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
mu[1:5] <- rep(0, 5)
sigma[1:5] <- 1/rep(100, 5)
simgma.mat[1:5, 1:5] <- diag(sigma[1:5])
beta[1:5] ~ dmnorm(mu[1:5], sigma_mat[1:5, 1:5])
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma.y)
}
sigma.y ~ dunif(0, 100)
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), sigma.y = sd(Y), sigma_mat = diag(rep(1/100, 5)), mu = rep(0, 5))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## test multivariate node
expect_error(configureRJ(mConf, "beta", prior =0.5),
'is multivariate; only univariate priors can be used with reversible jump sampling.')
})
test_that("Check passing node vector - no indicator", {
#####################################
## Vector node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
for(i in 1:5){
beta[i] ~ dnorm(0, sd = 100)
}
sigma ~ dunif(0, 100)
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 10), sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## no error
expect_no_error(configureRJ(mConf, c("beta"), prior = 0.5))
mConf <- configureMCMC(m)
expect_no_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), prior = 0.5))
mConf <- configureMCMC(m)
expect_no_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), prior = c(0.5, 0.2)))
})
test_that("Check sampler_RJ behaviour - no indicator", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * x1[i] + beta2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
## check sampler behaviour
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2'))
configureRJ(mConf, c('beta1', 'beta2'), prior = 0.5)
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=1000, nburnin = 900, thin=1,
inits = list(beta0 = 1, beta1 = 1, beta2 = 1, sigma = sd(Y)), setSeed = 1)
## beta2 should be more likely to be 0
expect_true(sum(output[, 'beta2'] == 0)/100 > 0.5)
# expect_true(mean(output[which(output[, 'beta2'] != 0), 'beta2']) - coef(lm(Y ~ x1 + x2))[3] < 0.05) ## should check that beta2 is small when in the model
## beta1 should be less likely to be 0
expect_true(sum(output[, 'beta1'] == 0)/100 < 0.5)
## beta1 estimate (comparison with lm estimate)
expect_lt(abs(mean(output[which(output[, 'beta1'] != 0), 'beta1'])- as.numeric(coef(lm(Y ~ x1 + x2))[2])), 0.1)
# ## beta1 should be in the model in last 100 iterations (chain has converged)
# expect_false(any(output[, 'beta1'] == 0))
#######
## change proposal mean for beta1 - still reasonable even if far
## dnorm(1.5, 3, 1) = 0.12
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1'))
configureRJ(mConf, 'beta1', prior = 0.5, control = list(mean = 3))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## beta1 estimate (comparison with lm estimate)
expect_lt(abs(mean(output[which(output[, 'beta1'] != 0), 'beta1']) - as.numeric(coef(lm(Y ~ x1 + x2))[2])), 0.1)
#######
## fixed value on true beta1
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1'))
configureRJ(mConf, 'beta1', prior = 0.5, control = list(fixedValue = 1.5))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
expect_lt(abs(mean(output[which(output[, 'beta1'] != 0), 'beta1'])- 1.5), 0.01)
#######
## fixedValue on far value for beta2
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta2'))
configureRJ(mConf, 'beta2', prior = 0.5, control = list(fixedValue = 5))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 1, beta1 = 1, beta2 = 1, sigma = sd(Y)), setSeed = 1)
## still beta2 is in the models but really small
expect_lt(abs(mean(output[which(output[, 'beta2'] != 0), 'beta2'])), 0.1)
if(.Platform$OS.type != "windows") {
nimble:::clearCompiled(m)
}
})
######################################
## Tests using indicator variables
######################################
test_that("Test configureRJ with indicator variables", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
z1 ~ dbern(psi) ## indicator variable for including beta2
z2 ~ dbern(psi) ## indicator variable for including beta2
psi ~ dbeta(1, 1)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * z1 * x1[i] + beta2 * z2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## One node
nodes <- c("beta2")
expect_error(configureRJ(mConf, nodes),
"configureRJ: Provide 'indicatorNodes' or 'priorProb' vector")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2")),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
## One node, multiple parameters
expect_error(configureRJ(mConf, nodes, indicatorNodes = "z1", control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, indicatorNodes = "z1", control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
## Multiple nodes, less paramters
nodes <- c("beta0", "beta1", "beta2")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2")),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2"), control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2"), control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
})
test_that("Test configureRJ with multivariate node - indicator", {
##############################
## Multivariate node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
mu[1:5] <- rep(0, 5)
sigma[1:5] <- 1/rep(100, 5)
simgma.mat[1:5, 1:5] <- diag(sigma[1:5])
beta[1:5] ~ dmnorm(mu[1:5], sigma_mat[1:5, 1:5])
for(i in 1:5){
## indicator variables
z[i] ~ dbern(0.5)
}
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5]*z[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma.y)
}
sigma.y ~ dunif(0, 100)
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), sigma.y = sd(Y), sigma_mat = diag(rep(1/100, 5)), mu = rep(0, 5))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## test multivariate node
expect_error(configureRJ(mConf, "beta", indicatorNodes = "z"),
'is multivariate; only univariate nodes can be used with reversible jump sampling.')
})
test_that("Check sampler_RJ_indicator behaviour - indicator", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
z1 ~ dbern(psi) ## indicator variable for including beta2
z2 ~ dbern(psi) ## indicator variable for including beta2
psi ~ dbeta(1, 1)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * z1 * x1[i] + beta2 * z2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
## check sampler behaviour
m <- nimbleModel(code, data=data, inits=inits)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2', 'z1', 'z2'))
configureRJ(mConf, c('beta1', 'beta2'), indicator =c('z1', 'z2'))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE, resetFunctions = TRUE)
output <- runMCMC(cMCMC, niter=1000, nburnin = 900, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## beta2 should be more likely to be 0
expect_true(mean(output[, 'z2']) < 0.5)
## beta2 should be 0 when z1 is 0
expect_equal(sum(output[, 'beta2'] != 0)/100, mean(output[, 'z2']) )
## beta1 should be less likely to be 0
expect_true(mean(output[, 'z1']) > 0.5)
## beta1 should be 0 when z1 is 0
expect_equal(sum(output[, 'beta1'] != 0)/100, mean(output[, 'z1']) )
## check beta1 estimate
expect_lt(abs(mean(output[which(output[, 'z1'] != 0), 'beta1']) - as.numeric(coef(lm(Y ~ x1 + x2))[2])), 0.1)
## more challeging data
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 1 * x1 - 1 * x2, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
m <- nimbleModel(code, data=data, inits=inits)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2', 'z1', 'z2'))
configureRJ(mConf, c('beta1', 'beta2'), indicator =c('z1', 'z2'))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE, resetFunctions = TRUE)
output <- runMCMC(cMCMC, niter=100, nburnin = 0, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## check toggled_sampler
## when indicators are zero parameters are zero
expect_equal(which(output[, 'beta1'] == 0), which(output[, 'z1'] == 0))
expect_equal(which(output[, 'beta2'] == 0), which(output[, 'z2'] == 0))
if(.Platform$OS.type != "windows") {
nimble:::clearCompiled(m)
}
})
test_that("Check passing node vector - indicator", {
#####################################
## Vector node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
for(i in 1:5){
beta[i] ~ dnorm(0, sd = 100)
z[i] ~ dbern(psi[i])
psi[i] ~ dbeta(1, 1)
}
sigma ~ dunif(0, 100)
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5]*z[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), z = rep(0, 5), psi = rep(0.5, 5), sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## no error
expect_no_error(configureRJ(mConf, targetNodes = "beta", indicatorNodes = "z"))
mConf <- configureMCMC(m)
expect_no_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), indicatorNodes = c("z[1]", "z[2:4]")))
## throws error
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), indicatorNodes = "z"),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
# if(.Platform$OS.type != "windows") {
# nimble:::clearCompiled(m)
# }
})
test_that("Bails out for non-constant target node hyperparameters", {
code <- nimbleCode({
sigma ~ dunif(0, 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = sigma)
sigma2 <- sigma^2
beta3 ~ dnorm(0, sd = sigma2)
z ~ dbern(0.5)
Ypred <- beta0 + beta1 * z * x1 + beta2 * z * x2 + beta3 * z * x3
Y ~ dnorm(Ypred, sd = sigma)
})
Rmodel <- nimbleModel(code)
conf <- configureMCMC(Rmodel)
expect_no_error(configureRJ(conf, targetNodes = 'beta1', indicatorNodes = 'z'))
conf <- configureMCMC(Rmodel)
expect_error(configureRJ(conf, targetNodes = 'beta2', indicatorNodes = 'z'))
conf <- configureMCMC(Rmodel)
expect_error(configureRJ(conf, targetNodes = 'beta3', indicatorNodes = 'z'))
conf <- configureMCMC(Rmodel)
expect_no_error(configureRJ(conf, targetNodes = 'beta1', priorProb = 0.5))
conf <- configureMCMC(Rmodel)
expect_error(configureRJ(conf, targetNodes = 'beta2', priorProb = 0.5))
conf <- configureMCMC(Rmodel)
expect_error(configureRJ(conf, targetNodes = 'beta3', priorProb = 0.5))
})
|
9ec8f76375558b41f8ccaa5b48dfe202720b84bf | 59ff127da27e4cad25c6c4a0e22eb936770105ec | /man/psvd.init.Rd | 6675074cb163436cd5c0e4e7d014d1eebd479348 | [] | no_license | lgl15/cnmtf | ca3445367057b782428104660a2de406914239a7 | 8e7cb76e2a2f08bd652fd59ba25354e7d3e6753d | refs/heads/master | 2020-03-10T13:43:45.447310 | 2019-03-21T18:54:36 | 2019-03-21T18:54:36 | 129,406,824 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,063 | rd | psvd.init.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fact_init.R
\name{psvd.init}
\alias{psvd.init}
\title{SVD Initialisation for NMF}
\usage{
psvd.init(A, k)
}
\arguments{
\item{A}{Nonnegative matrix A, \emph{n x n}}
\item{k}{Rank of the computed factors (number of clusters)}
}
\value{
\code{W} nonnegative matrix, \emph{n x k}
}
\description{
Function to initialize Non-negative Matrix Factorization algorithms
}
\references{
C. Boutsidis and E. Gallopoulos, SVD-based initialization: A head start for nonnegative matrix factorization, Pattern Recognition, Elsevier
}
\seealso{
Other Factorisation functions: \code{\link{clus.membership}},
\code{\link{cnmtf}}, \code{\link{consensus.clust}},
\code{\link{hierarchical.clust}},
\code{\link{initialise.UV}}, \code{\link{neg.constrain}},
\code{\link{parameters.cnmtf}},
\code{\link{plot.parameter}},
\code{\link{pos.constrain}},
\code{\link{regression.snps}}, \code{\link{score.cnmtf}},
\code{\link{synthetic.gwas}}
}
\author{
Luis G. Leal, \email{lgl15@imperial.ac.uk}
}
|
e41c585d33c2ff191c1781c0f23fde714464ebb6 | eadd73637926595f1466eb9e9310ab4a9a835f93 | /R/calcul.mat.r | 17ac68b579412777520db8d5f55bf1f8559c15d0 | [] | no_license | ThChauvin/MicrodensiteR | 87b81506e0ed335b64abcfa6dbb9d056c029bb14 | 2f4f12699e50833961bcf3fbb52bcb84bfcc366d | refs/heads/master | 2021-09-05T05:00:11.998327 | 2018-01-24T08:11:23 | 2018-01-24T08:11:23 | 118,461,757 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,220 | r | calcul.mat.r | #' Calcul.mat
#'
#' @return
#' @export
#' @param ww the pattern you want to recognize in the name of the files you want to used to calculate variables.
#' @param papa indicate the value of the interval of resolution in Windendro.
#' @param subst1 where to start in the substring of the file name to determined the identification code
#' @param subst2 where to stop in the substring of the file name to determined the identification code
#' @examples
calcul.mat<-function(ww = "dat", papa, subst1 , subst2 )
{
par(mfrow=c(1,1))
liste.profils <- objects(pattern = ww ,pos=1)
matrix.resultats <- NULL #matrice des resultats
vec <- NULL #vecteur forme a chaque boucle : donnees par cerne
pas <- papa
for (i in 1:length(liste.profils))
{
#DEPART!
print(i)
nom<-liste.profils[i]
print(nom)
x <- get(nom, pos = 1) #profil
millesime <- max(as.numeric(x$millesimes)) #millesime VERIFIER
lim.cernes <- x$lim.cernes #limites de cerne
lim.bibf1 <- x$lim.bibf #decoupe selon la limite BI-BF Nancy
plot(x$profil,pch=".")
abline(v=lim.cernes)
abline(v=lim.bibf1)
text(lim.cernes-(c(lim.cernes,NA)-c(0,lim.cernes))/2,max(x$profil),as.character(x$millesimes),cex=0.7)
nn <- length(lim.cernes)
num.cernes<-1:nn
text(lim.cernes-(c(lim.cernes,NA)-c(0,lim.cernes))/2,0.9*max(x$profil),as.character(num.cernes),cex=0.8)
#readline()
mm <- length(lim.bibf1)
profil <- x$profil
#boucle "cerne par cerne dans l'arbre"
toutes.lim.cernes<-c(1,lim.cernes)
for (j in num.cernes)
{
lim1 <- toutes.lim.cernes[j]
lim2 <- toutes.lim.cernes[j + 1]
l.bibf1 <- lim.bibf1[j]
Lo <- round((length(profil[lim1:lim2])) * pas, 4) #largeur de cerne en mm
Li <- round((length(profil[lim1:l.bibf1])) * pas, 4) #largeur du bi en mm type Nancy
Lf <- round(Lo - Li, 4) #largeur du bf en mm
Do <- round(mean((profil[lim1:lim2]), na.rm = T), 3) #densite moyenne de cerne
Di <- round(mean((profil[lim1:l.bibf1]), na.rm = T), 3) #densite moyenne bi type Nancy
Df <- round(mean((profil[l.bibf1:lim2]), na.rm = T), 3) #densite moyenne bf
Mi <- round(min((profil[lim1:lim2]), na.rm = T), 3) #densite min cerne
Ma <- round(max((profil[lim1:lim2]), na.rm = T), 3) #densite max cerne
profil.sans.NA <- profil[lim1:lim2][!is.na(profil[lim1:lim2])]
Eo <- round(sqrt(var(profil.sans.NA)), 3) #?cart type de densit? intra-cerne
Ei <- round(sqrt(var(profil[lim1:l.bibf1])), 3) #?cart type de densit? intra-bi
Ef <- round(sqrt(var(profil[l.bibf1:lim2])), 3) #?cart type de densit? intra-bf
Co <- Ma - Mi # contraste de densit? intra-cerne
annee <- x$millesimes[j] #annee du cerne
#prov<-pac[i,"prov"]
vec <- data.frame( substring(liste.profils[i],subst1,subst2 ), annee, j, Lo, Li, Lf, Do, Di, Df, Mi, Ma, Eo, Ei, Ef, Co) #cas particulier ?
#ADAPTER le substring !
colnames(vec) <- c("code", "ye","ce", "Lo", "Li", "Lf", "Do", "Di", "Df", "Mi", "Ma", "Eo", "Ei", "Ef", "Co")
matrix.resultats <- rbind(matrix.resultats, vec)
# matrix.resultats <- data.frame(matrix.resultats[,c(1,as.numeric(as.character(2:15)))])
assign("matmdm", matrix.resultats, pos = 1, immediate = T)
}
}
}
|
98514ed64ba1b83b528626a408c33be9104810a9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lmSupport/examples/varReverse.Rd.R | 86a39629572359284b527b1a47b2e40f1b0d67b3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 214 | r | varReverse.Rd.R | library(lmSupport)
### Name: varReverse
### Title: Reverse score an ordinal or boolean scored item/variable
### Aliases: varReverse
### Keywords: manip
### ** Examples
##d$Item5r = varReverse(d$Item5, 1, 5)
|
5358f2f3df461ac0b7ba583f522535443b99658b | d74208b48e1595366435acfe90584036e89dd88e | /man/existsNlGasFlaresRds.Rd | 7b79efa60133d1f5fdcbb3a6061ed639a71d5700 | [] | no_license | mjdhasan/Rnightlights | 85feaac20d8ed20429d95a41b59fef59e23a4cfa | f34fd986a405b1ca51d9a807849d2274f8e22d22 | refs/heads/master | 2022-11-06T18:50:41.533156 | 2020-06-26T12:11:28 | 2020-06-26T12:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 405 | rd | existsNlGasFlaresRds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gasflares.R
\name{existsNlGasFlaresRds}
\alias{existsNlGasFlaresRds}
\title{Check if the world gasflare RDS exists}
\usage{
existsNlGasFlaresRds()
}
\value{
\code{logical} if the gasflare rds exists
}
\description{
Check if the world gasflare RDS exists
}
\examples{
\dontrun{
existsNlGasFlaresRds()
#returns TRUE/FALSE
}
}
|
8e13b1c508ee6a07502ea6ce2ea69370ad22e1de | 3351f5dbee74cca16ba99519e5871096cdf8f032 | /single_cell/models_list.R | 68bc13e456da2d3eded058ffe224ec3043d84056 | [] | no_license | BeyremKh/Droplasso-experiments | 8723298833a50c1d0cf4aceb72b84b5926390ad3 | e298e83776471fe9e60d6360dc479c411105b1d3 | refs/heads/master | 2020-03-27T09:05:23.619394 | 2020-01-14T07:20:37 | 2020-01-14T07:20:37 | 146,312,916 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,844 | r | models_list.R | #load models
load("~/models_conquer.Rdata")
for (k in (1:length(models))){
a=list(sapply(1:length(models[[k]]$list_lasso) , function(i) strsplit(models[[k]]$list_lasso,"[.]")[[i]][1]))
file.create(paste(paste(models[[k]]$dataset$displayname,"lasso",sep="_"),"txt",sep="."))
fileConn<-file(paste(paste(models[[k]]$dataset$displayname,"lasso",sep="_"),"txt",sep="."))
writeLines(unlist(lapply(a[[1]], paste, collapse=" ")),fileConn)
close(fileConn)
a=list(sapply(1:length(models[[k]]$list_el) , function(i) strsplit(models[[k]]$list_el,"[.]")[[i]][1]))
file.create(paste(paste(models[[k]]$dataset$displayname,"el",sep="_"),"txt",sep="."))
fileConn<-file(paste(paste(models[[k]]$dataset$displayname,"el",sep="_"),"txt",sep="."))
writeLines(unlist(lapply(a[[1]], paste, collapse=" ")),fileConn)
close(fileConn)
a=list(sapply(1:length(models[[k]]$list_dl) , function(i) strsplit(models[[k]]$list_dl,"[.]")[[i]][1]))
file.create(paste(paste(models[[k]]$dataset$displayname,"dl",sep="_"),"txt",sep="."))
fileConn<-file(paste(paste(models[[k]]$dataset$displayname,"dl",sep="_"),"txt",sep="."))
writeLines(unlist(lapply(a[[1]], paste, collapse=" ")),fileConn)
close(fileConn)
a=list(sapply(1:length(models[[k]]$list_all) , function(i) strsplit(models[[k]]$list_all,"[.]")[[i]][1]))
file.create(paste(paste(models[[k]]$dataset$displayname,"all",sep="_"),"txt",sep="."))
fileConn<-file(paste(paste(models[[k]]$dataset$displayname,"all",sep="_"),"txt",sep="."))
writeLines(unlist(lapply(a[[1]], paste, collapse=" ")),fileConn)
close(fileConn)
}
legend("bottomleft", legend=leg, col=1:5, pch=c(4,15:18), bty="n")
meanfeat_norm=sapply(1:nrow(meanfeat),function(i) meanfeat[i,]/ result[[i]]$ngenes)
#sparsity vs accuracy plot
matplot(meanAUC, 1- t(meanfeat_norm), ylab="Model sparsity",pch = c(4,15:18),bty="l")
|
a2c8a457979802367eeb1a7f6f7418ed8d0029f1 | 4f3f9dd1be4a18218c04771b7d00ef6c3ffd91e0 | /R/authorise.R | a79b2602399105d192c30611b1f265d81be49ef9 | [] | no_license | SmarshMELLOW/ubeR | e605f5cc62dcae8166c322d93707dc147d21a581 | 63749d52ef883584b336995182ffd84c5b364f5e | refs/heads/master | 2021-06-17T09:48:34.660163 | 2017-05-10T07:00:27 | 2017-05-10T07:00:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,351 | r | authorise.R | # SERVERID ------------------------------------------------------------------------------------------------------------
uber_set_serverid <- function(serverid) {
if (nchar(serverid) != 40) stop("Server ID should be 40 characters long.")
assign("serverid", serverid, envir = auth_cache)
}
uber_get_serverid <- function() {
serverid = tryCatch(
get("serverid", envir = auth_cache),
error = function(e) {
stop("No server ID has been set. Please call set_serverid().")
return(NA)
}
)
return(serverid)
}
# OAUTH2 --------------------------------------------------------------------------------------------------------------
# It might be worthwhile considering saving a default token as is done for rdrop2 package.
#' Authenticate with the Uber API
#'
#' This function wraps the process of OAuth2.0 authentication with the Uber API. This must be done before further interactions can take place.
#'
#' If \code{cache = TRUE} then the authentication token will be stored in a \code{.httr-oauth} file.
#'
#' @param client_key The client key provided by Uber.
#' @param client_secret The client secreat provided by Uber.
#' @param cache Whether or not to cache authentication token.
#' @seealso \code{\link[httr]{oauth_app}}.
#' @examples
#' \dontrun{
#' # The key/secret combination below is not valid.
#' uber_oauth("ReTZRlEGNnzkitsn-A23MiXbnMNzdQf8",
#' "NpWYGY8W7cv63tfM2neciVpjZOAF_wx1GHRG94A2")
#' }
#' @import httr
#' @import httpuv
#' @export
uber_oauth <- function(client_key, client_secret, cache = TRUE) {
endpoint <- httr::oauth_endpoint(
authorize = "https://login.uber.com/oauth/v2/authorize",
access = "https://login.uber.com/oauth/v2/token"
)
# Sys.setenv("HTTR_SERVER_PORT" = "1410/")
#
scope = c("profile", "history", "places", "request")
token <- httr::oauth2.0_token(endpoint,
httr::oauth_app("uber", key = client_key, secret = client_secret),
scope = scope, cache = cache)
assign("oauth_token", token, envir = auth_cache)
}
has_oauth_token <- function() {
exists("oauth_token", envir = auth_cache)
}
get_oauth_token <- function() {
if (!has_oauth_token()) {
stop("This session doesn't yet have OAuth2.0 authentication.")
}
return(get("oauth_token", envir = auth_cache))
}
|
4c6878fec5d711c15561b17fa311a1000bd076fb | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/octalene.R | 160217c6d1f33ea33e908768339cf3d0e5f14b67 | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 204 | r | octalene.R | library("knitr")
library("rgl")
#knit("octalene.Rmd")
#markdownToHTML('octalene.md', 'octalene.html', options=c("use_xhml"))
#system("pandoc -s octalene.html -o octalene.pdf")
knit2html('octalene.Rmd')
|
920eff6f808e4862826ad2f3e174a0cb32362c38 | f2af3cfc5c0e85158690b5d677d1f2d67c0a6ad3 | /ribor/tests/testthat/test_coverage.R | 9b55b6c128597fda6d0a9cbbf46f592e97d7982f | [
"MIT"
] | permissive | ijhoskins/riboR_alpha | e6085cb1624f2a5755206ed262f7be171b48a1ed | 7ed91be7c56f4b2c5d065ea825ddfed197e110ca | refs/heads/master | 2020-06-12T14:39:42.852046 | 2019-06-28T18:24:47 | 2019-06-28T18:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,705 | r | test_coverage.R | context("coverage functions")
library(ribor)
ribo.object <- ribo("sample.ribo")
cov_1 <- get_coverage(ribo.object,
name = "MYC",
range.lower = 2,
range.upper = 5,
experiments = "Hela_1")
cov_2 <- get_coverage(ribo.object,
name = "VEGFA",
range.lower = 2,
range.upper = 5,
experiments = "Hela_1")
cov_3 <- get_coverage(ribo.object,
name = "GAPDH",
range.lower = 2,
range.upper = 5,
experiments = c("Hela_1"))
actual <- sum(colSums(cov_1[, -1])) +
sum(colSums(cov_2[, -1])) +
sum(colSums(cov_3[, -1]))
expected <- 118
test_that("get_coverage- total reads of an experiment",
expect_equal(actual, expected))
cov_4 <- get_coverage(ribo.object,
name = "GAPDH",
range.lower = 2,
range.upper = 2,
experiments = c("Hela_1"))
actual <- rowSums(cov_4[, -1])
expected <- 14
test_that("get_coverage- test individual read length",
expect_equal(actual, expected))
cov_5 <- get_coverage(ribo.object,
name = "MYC",
range.lower = 3,
range.upper = 3,
experiments = c("Hela_2"))
actual <- rowSums(cov_5[, -1])
expected <- 3
test_that("get_coverage- test individual read length",
expect_equal(actual, expected))
|
f1c7b50ef7e0d3a376147018f12a42836f8c9442 | 29585dff702209dd446c0ab52ceea046c58e384e | /fbRanks/R/scrape_clubsoccer.R | 2536806d8274fac6d5dd64ca16c869fb0be28790 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,273 | r | scrape_clubsoccer.R | ###############################################
# scrape scores from US Club Soccer
# url="http://clubsoccer.us/TTSchedules.aspx?tid=USCLUBG&year=2013&stid=USCLUBG&syear=2012&div=U12M01"
###############################################
scrape.usclub = function(url, file="USClub", url.date.format="%A%m/%d/%Y", date.format="%Y-%m-%d", append=FALSE, ...){
require(XML)
if(!is.character(file) | length(file)!=1 )
stop("file must be a character vector.\n",call.=FALSE)
if(!is.logical(append) | length(append)!=1)
stop("append must be a TRUE/FALSE.\n",call.=FALSE)
tb=readHTMLTable(url, as.data.frame = TRUE, stringsAsFactors = FALSE)
tb.start=which(names(tb)=="ctl00_ContentPlaceHolder1_TableBtn")+3
#tb.end=which(names(tb)=="ctl00_ContentPlaceHolder1_ChangesTable")-1
tb.end=tb.start+min(which(!(names(tb)[tb.start:length(tb)]=="NULL")))-2
game.tbs=tb[seq(tb.start,tb.end,2)]
for(i in 1:length(game.tbs)){
this.tb=game.tbs[[i]]
this.tb=this.tb[!is.na(this.tb[,"V2"]),,drop=FALSE]
the.dates=as.Date(this.tb[seq(1,dim(this.tb)[1],2),"V2"], url.date.format)
the.dates=format(the.dates, date.format)
the.home.team=this.tb[seq(1,dim(this.tb)[1],2),"V7"]
the.home.score=this.tb[seq(1,dim(this.tb)[1],2),"V5"]
the.away.team=this.tb[seq(2,dim(this.tb)[1],2),"V3"]
the.away.score=this.tb[seq(2,dim(this.tb)[1],2),"V1"]
this.table=data.frame(date=the.dates,home.team=the.home.team, home.score=the.home.score, away.team=the.away.team, away.score=the.away.score)
if(i==1) my.table=this.table
else my.table=rbind(my.table,this.table)
}
# Set the column headings
colnames(my.table)=c("date","home.team","home.score", "away.team", "away.score")
#Replace missing scores with NaN
my.table$home.score[my.table$home.score==""]=NaN
my.table$away.score[my.table$away.score==""]=NaN
extra=list(...)
for(i in names(extra)){
if(!(length(extra[i])==1 | length(extra[i])==dim(my.table)[1])) stop(paste("The number of values in",i,"must be 1 or equal to the number of matches."))
my.table[i]=extra[i]
}
# Save
if(!append) colsn=TRUE else colsn=FALSE
if(str_sub(file, -4)!=".csv") file=paste(file,".csv",sep="")
write.table(my.table, file=file,row.names=FALSE,col.names=colsn,append=append,sep=",",qmethod="double")
} |
f916a13697e06b89f17b1f5817cefc636857a1a2 | a895062711c7cd7d9672fca3ae348cb8a247b04c | /plot4.R | 7c958aee1c0d8443376394a3277bedd351f3c03b | [] | no_license | fimassuda/ExData_Plotting1 | 38ea6ea4a215edb6a6216ee1c3af0a9f029ff78f | a8b557b2e9834c44be875538918f290d0f04ab38 | refs/heads/master | 2020-12-28T22:54:09.039136 | 2015-06-07T08:29:30 | 2015-06-07T08:29:30 | 37,001,848 | 0 | 0 | null | 2015-06-07T02:28:03 | 2015-06-07T02:28:01 | null | UTF-8 | R | false | false | 1,233 | r | plot4.R | plot4 <- function(){
hp <- read.table("household_power_consumption.txt", header = TRUE, na.strings = "?", sep = ";")
hp <- hp[hp["Date"] == "1/2/2007" | hp["Date"] == "2/2/2007", ]
hp$date.time <- strptime(paste(hp$Date, hp$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
png(filename = "plot4.png", width = 480, height = 480, units = "px", bg = "transparent")
par(mfrow = c(2, 2))
with(hp, {
plot(hp$date.time,hp$Global_active_power, xlab = "", ylab = "Global Active Power", type = "l")
plot(hp$date.time, hp$Voltage, xlab = "datetime", ylab = "Voltage", type = "l")
with(hp, plot(hp$date.time,hp$Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l", col = "black"))
with(hp, lines(hp$date.time,hp$Sub_metering_2, xlab = "", ylab = "Energy sub metering", type = "l", col = "red"))
with(hp, lines(hp$date.time,hp$Sub_metering_3, xlab = "", ylab = "Energy sub metering", type = "l", col = "blue"))
legend("topright", col = c("black","red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1)
plot(hp$date.time, hp$Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type = "l")
})
dev.off()
} |
0a0226d86eeee697c0b1e350dcc21c87612a9d42 | ece76676ce0e36438b626e0a9394766e9d658ad2 | /data-raw/fontawesome-update.R | f4203da82cee04850147e74c8c0adce0367646d0 | [
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0",
"MIT"
] | permissive | rstudio/fontawesome | 8a1b65937626b5228ff6c978a65ae502faca59f9 | b056ecaa9169b98e9d01eacbb66dd43e8b4e7cd7 | refs/heads/main | 2023-09-01T02:24:26.547607 | 2023-08-24T00:32:47 | 2023-08-24T00:32:47 | 135,504,089 | 277 | 45 | NOASSERTION | 2023-08-23T21:58:29 | 2018-05-30T22:31:54 | R | UTF-8 | R | false | false | 8,270 | r | fontawesome-update.R | # This script is used for extracting the raw SVG from the `icons.json`
# file that exists in the `Font-Awesome` repo (address below) and that
# also ships with the official releases.
#
# We assume that `icons.json` is relatively self-contained and stable.
# We cannot assume that any of SVG extracted from this file in this
# location are considered final
library(rprojroot)
library(pointblank)
library(dplyr)
library(purrr)
library(tibble)
library(withr)
version_tag <- "6.4.2"
base_url <-
file.path(
"https://raw.githubusercontent.com/FortAwesome/Font-Awesome", version_tag
)
# FA4 -> FA6 shims
shims <- yaml::read_yaml(file.path(base_url, "metadata/shims.yml"))
# All icon info
icons <- jsonlite::fromJSON(file.path(base_url, "metadata/icons.json"))
# Reported version (should match `version_tag`)
fa_version <-
jsonlite::fromJSON(
file.path(base_url, "js-packages/@fortawesome/fontawesome-free/package.json")
)$version
# Tidy the `icons` table
fa_tbl <-
icons %>%
tibble::enframe("name", "info") %>%
dplyr::mutate(
label = purrr::map_chr(info, "label"),
svg = purrr::map(info, function(x) tibble::enframe(x$svg, "style", "svg_info"))
) %>%
dplyr::select(-info) %>%
tidyr::unnest(svg) %>%
dplyr::mutate(
path = map_chr(svg_info, "path"),
min_x = purrr::map_int(svg_info, ~as.integer(.x$viewBox[1])),
min_y = purrr::map_int(svg_info, ~as.integer(.x$viewBox[2])),
width = purrr::map_int(svg_info, ~as.integer(.x$viewBox[3])),
height = purrr::map_int(svg_info, ~as.integer(.x$viewBox[4]))
) %>%
dplyr::select(-svg_info) %>%
dplyr::mutate(full_name = paste0("fa", substr(style, 1, 1), " fa-", name)) %>%
dplyr::mutate(prefix = paste0("fa", substr(style, 1, 1))) %>%
dplyr::select(name, prefix, full_name, everything())
# Create a table of alias names
alias_tbl <-
dplyr::tibble(
alias = character(0),
name = character(0)
)
for (ico in names(icons)) {
if (!is.null(icons[[ico]][["aliases"]][["names"]])) {
alias_tbl_i <-
dplyr::tibble(
alias = icons[[ico]][["aliases"]][["names"]],
name = ico
)
alias_tbl <- dplyr::bind_rows(alias_tbl, alias_tbl_i)
}
}
# Generate the `font_awesome_brands` vector for faster retrieval in `fa_i()`
font_awesome_brands <- unique(fa_tbl$name[grepl("fab ", fa_tbl$full_name)])
# ==============================================================================
# Perform validation testing before writing data
# ==============================================================================
# Expect that rows are distinct (with different
# groupings of columns)
expect_rows_distinct(fa_tbl)
# Expect that no values are missing
expect_col_vals_not_null(fa_tbl, "name")
expect_col_vals_not_null(fa_tbl, "style")
expect_col_vals_not_null(fa_tbl, "full_name")
expect_col_vals_not_null(fa_tbl, "path")
expect_col_vals_not_null(fa_tbl, "min_x")
expect_col_vals_not_null(fa_tbl, "min_y")
expect_col_vals_not_null(fa_tbl, "width")
expect_col_vals_not_null(fa_tbl, "height")
expect_col_vals_not_null(fa_tbl, "label")
# Expect that the `style` column contains 3 types of values
expect_col_vals_in_set(
fa_tbl,
columns = "style",
set = c("regular", "solid", "brands")
)
# Expect that the `name` column only has a certain character set
expect_col_vals_regex(
fa_tbl,
columns = "name",
regex = "^[a-z0-9-]*?$"
)
# Expect values in the `full_name` column to adhere to a specific pattern
expect_col_vals_regex(
fa_tbl,
columns = "full_name",
regex = "^fa[brs] fa-[a-z0-9-]*?$"
)
# Expect that the prefix of `full_name` corresponds to the `style` value
expect_col_vals_expr(fa_tbl, ~ case_when(
style == "regular" ~ grepl("^far", full_name),
style == "solid" ~ grepl("^fas", full_name),
style == "brands" ~ grepl("^fab", full_name)
))
# Expect that the `name` value is contained inside the `full_name` value
expect_col_vals_equal(
fa_tbl,
columns = "name",
value = vars(full_name),
preconditions = ~ . %>% mutate(full_name = gsub("fa[rsb] fa-", "", full_name))
)
# Expect there to be more than 2000 rows in the table
expect_col_vals_gt(
dplyr::count(fa_tbl),
columns = vars(n),
value = 2000
)
# Expect these column names in the table
expect_col_vals_make_set(
tibble(col_names = colnames(fa_tbl)),
columns = vars(col_names),
set = c(
"name", "prefix", "full_name", "label", "style",
"path", "min_x", "min_y", "width", "height"
)
)
# Expect that columns relating to the SVG
# viewBox have constant values
expect_col_vals_equal(fa_tbl, "min_x", 0)
expect_col_vals_equal(fa_tbl, "min_y", 0)
expect_col_vals_equal(fa_tbl, "height", 512)
# Expect that certain columns are of the integer class
expect_col_is_integer(fa_tbl, vars(min_x, min_y, width, height))
# ==============================================================================
# Save the icon and alias info to disk
# ==============================================================================
# Write the `fa_tbl` and `alias_tbl` tables to internal data ('R/sysdata.rda')
usethis::use_data(
fa_tbl,
alias_tbl,
overwrite = TRUE, internal = TRUE
)
# Write a CSV to the `data-raw` folder for other projects to consume
readr::write_csv(
fa_tbl,
rprojroot::find_package_root_file("data-raw/fa_tbl.csv")
)
# Write the `fa_version.R` and `fa_brands.R` files to the `R` dir
withr::with_dir(
rprojroot::find_package_root_file("R"), {
cat(
"# Generated by fontawesome-update.R: do not edit by hand\n\n",
"fa_version <- ",
paste(capture.output(dput(fa_version)), collapse = ""),
sep = "",
file = "fa_version.R"
)
cat(
"# Generated by fontawesome-update.R: do not edit by hand\n\n",
"font_awesome_brands <- ",
paste(capture.output(dput(font_awesome_brands)), collapse = ""),
sep = "",
file = "fa_brands.R"
)
}
)
# ==============================================================================
# Copy over 'css' and 'webfonts' assets
# ==============================================================================
zip_file <- file.path(tempdir(), paste0("font-awesome-", fa_version, ".zip"))
url <-
paste0(
"https://github.com/FortAwesome/Font-Awesome/releases/download/",
fa_version, "/fontawesome-free-", fa_version, "-web.zip"
)
download.file(url, zip_file)
unzip(zip_file, exdir = tempdir())
source_dir <- file.path(tempdir(), paste0("fontawesome-free-", fa_version, "-web"))
dest_dir <- rprojroot::find_package_root_file("inst/fontawesome")
unlink(dest_dir, recursive = TRUE)
copy_files <- function(srcdir, destdir, filenames) {
# Create needed directories
dest_subdirs <- file.path(destdir, unique(dirname(filenames)))
for (dest_subdir in dest_subdirs) {
dir.create(dest_subdir, recursive = TRUE)
}
res <- file.copy(
from = paste0(srcdir, "/", filenames),
to = paste0(destdir, "/", filenames)
)
if (!all(res)) {
message("Problem copying ", sum(!res), " files: \n ",
paste0(filenames[!res], collapse = "\n ")
)
}
}
filenames <-
c(
"css/all.css",
"css/all.min.css",
"css/v4-shims.css",
"css/v4-shims.min.css",
file.path("webfonts", dir(file.path(source_dir, "webfonts")))
)
# Copy the complete set of CSS and font files to `inst/fontawesome`
copy_files(source_dir, dest_dir, filenames)
# Remove some font files that won't be supported in this package
# Note: v6+ discontinues support for .woff in favor of .woff2, ttf is retained
withr::with_dir(dest_dir, {
# Patch the `all.css` file to remove entries for all but `woff2`
# and `ttf` icon files
readr::read_file(file = "css/all.css") %>%
gsub(
"src: url\\(.../webfonts/fa-([^.]+).*?}",
'src: url("../webfonts/fa-\\1.woff2") format("woff2"), url("../webfonts/fa-\\1.ttf") format("truetype"); }',
.
) %>%
readr::write_file(file = "css/all.css")
# Patch the `all.min.css` file to remove entries for all but `woff2`
# and `ttf` icon files
readr::read_file(file = "css/all.min.css") %>%
gsub(
"src:url\\(../webfonts/fa-([^.]+).*?}",
'src: url("../webfonts/fa-\\1.woff2") format("woff2"), url("../webfonts/fa-\\1.ttf") format("truetype"); }',
.
) %>%
readr::write_file(file = "css/all.min.css")
})
|
a49770f843dc6a33e84fec89b5f5dd0404d0e3bb | de9e9f3f810c27afb632a9e0292d9ea9baa06f39 | /R/mna_tick.R | 46b77cf16ea421a7d014a52cc2d8100e36614529 | [] | no_license | JohnRFoster/fosteR | ad19ce2cedecacfaa00013b9dc3d3354ebf2c661 | 849e9f10a8a1920e47c23ce2d41fbf982d01ec0f | refs/heads/master | 2020-03-28T22:01:39.690248 | 2019-02-22T19:31:42 | 2019-02-22T19:31:42 | 149,199,956 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,487 | r | mna_tick.R | ##---------------------------------------------------------------------------------##
## Mouse MNA closest to tick sampling ##
##---------------------------------------------------------------------------------##
#' Mouse MNA closest to tick sampling
#'
#' Function that finds the minimum number alive of mice nearest in time to
#' tick sampling date, returns a vector of MNA
#' @param tick.date vector of dates from tick sampling
#' @param mouse.date vecor of dates from mouse captures
#' @param known.states known states matrix
#' @export
#' @example mna_tick()
mna_tick_current <- function(sites = c("Green Control","Henry Control","Tea Control")){
minpositive <- function(x) min(x[x > 0])
mna <- list()
for(i in 1:length(sites)){
tick <- tick_cary(sites[i], "individual")
ch <- suppressWarnings(ch_cary(sites[i]))
ks <- known_states(ch)
tick.dates <- as.Date(colnames(tick))
tick.seq <- seq.Date(tick.dates[1],tick.dates[length(tick.dates)],1)
mouse.dates <- as.Date(colnames(ks))
mna.obs <- apply(ks, 2, sum)
mna.x <- vector()
for(j in 1:length(tick.seq)){
close.date <- which(tick.seq[j]-mouse.dates == minpositive(tick.seq[j]-mouse.dates))
mna.x[j] <- mna.obs[close.date]
}
mna[[i]] <- mna.x
}
out <- matrix(NA, 3, length(mna[[which.max(lengths(mna))]]))
for(i in 1:length(sites)){
for(j in 1:length(mna[[i]]))
out[i,j] <- mna[[i]][j]
}
return(out)
}
|
0730b613bf0aa066ce8b16fb99111fe0cae9e055 | a0daa41f8222203a1795c0d847b4743de0650db3 | /base.r | cdfb1b9338155517175530fb269bb7f48c887c10 | [] | no_license | headisbagent/WIRED | f0162fba11aafaa33cd21d6113224e6e140feae6 | 26abc0ff56de2dfbe2c5cce1095f2ed20574c6a0 | refs/heads/master | 2022-12-13T17:56:07.497247 | 2020-09-16T17:33:35 | 2020-09-16T17:33:35 | 296,101,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 535 | r | base.r | library(data.table)
library(reshape2)
library(gdxtools)
library(ggplot2)
library(rgdal)
library(ggmap)
library(placement)
library(sf)
library(lubridate)
library(ggnewscale)
library(RColorBrewer)
library(gganimate)
library(transformr)
library(e1071)
library(dplyr)
library(bit64)
library(cowplot)
library(patchwork)
igdx(gams.executable.location)
base.dir <- getwd()
print.lst.status <- function(file) {
lst.file <- readLines(file)
print(grep('SOLVER STATUS',lst.file,value=TRUE))
print(grep('MODEL STATUS',lst.file,value=TRUE))
} |
07f3fff78fea7ab5ea3f438ae51d735ef8103d25 | 17273704bee98aa89397673a900a91bf6819e8ac | /R/MGPS_uni_opt.R | 68e87793118e12640d5e4ac199116cf836f72b61 | [] | no_license | sidiwang/hgzips | cbc8246c3dea76d61036755a4aae1bacebe7240a | dd1a43b5efb2a3d3b4b7ba8b5b436d5793c29cbb | refs/heads/master | 2023-02-15T07:55:43.956668 | 2021-01-13T08:30:07 | 2021-01-13T08:30:07 | 287,367,518 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,513 | r | MGPS_uni_opt.R | #' HGZIPS - MGPS (uniroot-optimization)
#'
#' This MGPS function.........
#' @name MGPS_uni_opt
#' @aliases MGPS_uni_opt
#' @import stats
#'
#' @param alpha.par initial shape parameter vector of the two gamma distributions for implementing the EM algprithm
#' @param beta.par initial rate parameter vector of the two gamma distributions for implementing the EM algprithm
#' @param pi.par initial xxxxxxx?
#' @param N squashed N_ij data (vector). This data can be generated by the rawProcessing function in this package.
#' @param E squashed E_ij data (vector). This data can be generated by the rawProcessing function in this package.
#' @param weight set weight = rep(1, length(N)) if N and E are not squashed data, or input the weight vector corresponding to the squashed Nij vector.
#' @param iterations number of EM algorithm iterations to run
#' @param Loglik whether to return the loglikelihood of each iteration or not (TRUE or FALSE)
#' @seealso
#'
###########################################################
## MGPS, Expectation Maximization (optimization, uniroot)
###########################################################
# input N = Nij$frequency, E = Eij$baseline
ProfileLogLik <- function(alpha, Tij, weight, N, E) {
N = as.matrix(N)
E = as.matrix(E)
fff <- function(x) {
## Think of x as beta that you want to find
tau <- length(x)
ans <- x
for(k in 1:tau) {
ans[k] <- (alpha/x[k])*sum(Tij*weight) - sum((Tij*weight*(N + alpha))/(x[k] + E))
}
return(ans)
}
beta.val <- uniroot(fff, interval = c(0, 1000), extendInt = "yes")$root
probs <- beta.val/(E + beta.val)
ans <- sum(Tij*weight*dnbinom(N, size = alpha, prob = probs, log = TRUE))
return(ans)
}
MGPSParUpdate <- function(alpha.vec, beta.vec, pi.vec, N, E, weight) {
N = as.matrix(N)
E = as.matrix(E)
I <- nrow(N)
J <- ncol(N)
LT1ij <- LT2ij <- post.probs <- rep(0, I)
ff <- function(x, Tij, alpha, weight) {
tau <- length(x)
ans <- x
for(k in 1:tau) {
ans[k] <- (alpha/x[k])*sum(Tij*weight) - sum((Tij*weight*(N + alpha))/(x[k] + E))
}
return(ans)
}
LT1ij <- dnbinom(N, size = alpha.vec[1], prob=beta.vec[1]/(E + beta.vec[1]), log=TRUE)
LT2ij <- dnbinom(N, size = alpha.vec[2], prob=beta.vec[2]/(E + beta.vec[2]), log=TRUE)
logBF <- LT2ij - LT1ij + log(pi.vec[2]) - log(pi.vec[1])
post.probs[logBF < 0] <- exp(-log1p(exp(logBF[logBF < 0])))
post.probs[logBF >= 0] <- exp(-logBF[logBF >= 0] - log1p(exp(-logBF[logBF>=0])))
pi.vec <- c(sum(post.probs*weight)/sum(weight), 1 - sum(post.probs*weight)/sum(weight))
alpha.vec[1] <- optimize(ProfileLogLik, c(0, 1000), maximum = TRUE, Tij = post.probs, N = N, E = E, weight = weight)$maximum
beta.vec[1] <- uniroot(ff, interval=c(0, 1000), Tij = post.probs, alpha = alpha.vec[1], weight = weight, extendInt = "yes")$root
alpha.vec[2] <- optimize(ProfileLogLik, c(0, 1000), maximum = TRUE, Tij = 1 - post.probs, N = N, E = E, weight = weight)$maximum
beta.vec[2] <- uniroot(ff, interval = c(0, 1000), Tij = 1 - post.probs, alpha = alpha.vec[2], weight = weight, extendInt = "yes")$root
par.list <- list("alpha1" = alpha.vec[1], "beta1" = beta.vec[1], "alpha2" = alpha.vec[2], "beta2" = beta.vec[2], "pi" = pi.vec[1])
return(par.list)
}
LogLikMGPS <- function(theta, N, E, weight) {
N = as.matrix(N)
E = as.matrix(E)
I <- nrow(N)
J <- ncol(N)
D1 <- D2 <- matrix(0, nrow=I, ncol=J)
D1 <- dnbinom(N, size = theta$alpha1, prob=theta$beta1/(E + theta$beta1), log=TRUE)
D2 <- dnbinom(N, size = theta$alpha2, prob=theta$beta2/(E + theta$beta2), log=TRUE)
## Use LogSumExp trick
D1.vec <- log(theta$pi) + c(D1)
D2.vec <- log(1 - theta$pi) + c(D2)
log.dens <- rep(0, length(D1.vec))
log.dens[D1.vec < D2.vec] <- D2.vec[D1.vec < D2.vec] + log(1 + exp(D1.vec[D1.vec < D2.vec] - D2.vec[D1.vec < D2.vec]))
log.dens[D1.vec >= D2.vec] <- D1.vec[D1.vec >= D2.vec] + log(1 + exp(D2.vec[D1.vec >= D2.vec] - D1.vec[D1.vec >= D2.vec]))
loglik <- sum(log.dens*weight)
return(loglik)
}
# input: initial alpha, beta, pi, N = Nij$frequency, E = Eij$baseline, iterations
# output: estimated parameters of each iteration, and loglikelihood of each iteration
#' @rdname MGPS_uni_opt
#' @return a list of estimated parameters and their corresponding loglikelihood
#' @export
#' MGPS_uni_opt
MGPS_uni_opt = function(alpha.par, beta.par, pi.par, N, E, weight, iterations, Loglik){
niter <- iterations
alpha.par = matrix(NA, 2, niter + 1)
alpha.par[ ,1] = c(0.2, 2)
beta.par = matrix(NA, 2, niter + 1)
beta.par[, 1] = c(0.1, 4)
pi.par = matrix(NA, 2, niter + 1)
pi.par[, 1] = c(1/3, 2/3)
theta0 <- list()
theta0$alpha1 <- alpha.par[1,1]
theta0$alpha2 <- alpha.par[2,1]
theta0$beta1 <- beta.par[1,1]
theta0$beta2 <- beta.par[2,1]
theta0$pi <- pi.par[1,1]
ell <- rep(0, niter + 1)
ell[1] <- LogLikMGPS(theta0, N = N, E = E, weight = weight)
for (i in 1:niter) {
print(i)
theta_EM = MGPSParUpdate(alpha.vec = alpha.par[, i], beta.vec = beta.par[, i], pi.vec = pi.par[, i], N = N, E = E, weight = weight)
alpha.par[, i+1] = c(theta_EM$alpha1, theta_EM$alpha2)
beta.par[, i+1] = c(theta_EM$beta1, theta_EM$beta2)
pi.par[, i+1] = c(theta_EM$pi, 1 - theta_EM$pi)
if (Loglik == TRUE){
ell[i+1] <- LogLikMGPS(theta_EM, N = N, E = E, weight = weight)
} else {
ell = NA
}
}
result = list("alpha" = alpha.par, "beta" = beta.par, "pi" = pi.par, "loglik" = ell)
return(result)
}
|
f663e9510ac34e9acc019632bd8001905fc60094 | 8e709b564da4e64ea5ad4adf58de1046a09f7289 | /figure/plot2.R | c5fdfbd374fa092adfc3faa68b12626786dc497a | [] | no_license | VS94/ExData_Plotting1 | b96a5ed7c36e14ee41bc82ac316a8b65fe7129ca | ad9120f9c2852c035967daa95b7f75f69bee2839 | refs/heads/master | 2021-01-17T21:38:11.972076 | 2014-06-07T21:08:41 | 2014-06-07T21:08:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 487 | r | plot2.R | #loading data
d<-read.table("../household_power_consumption.txt", header=T, na.strings="?", sep=";" )
d$Time<- paste(d$Date, d$Time, sep=" ")
d$Date<-as.Date(strptime(d$Date, format="%d/%m/%Y"))
d$Time<-strptime(d$Time, format="%d/%m/%Y %H:%M:%S")
d.sub<-d[which(d$Date=="2007-02-01" | d$Date=="2007-02-02"),]
#Plot n°2
png("plot2.png", width=480, height=480)
plot(d.sub$Time,d.sub$Global_active_power, type='l', main="", xlab="", ylab="Globale Active Power (kilowatts)")
dev.off()
|
3fdc1a1c8c2eaaf3c80d808f4db4af4f358bfd3a | caaac3151e690edc027f715e6eabf67fc4952d14 | /R/usair_combine.R | df6a3c386b7e271028a516c9976d350f747074bb | [
"CC0-1.0"
] | permissive | nemochina2008/USAir | 7dd5383a03895b832d1c65059e7b1bf6a6052fe4 | 379ab6c4c33cff77e5fcc9ccf670b2e95c3ce226 | refs/heads/master | 2021-06-01T07:09:40.132974 | 2016-06-12T19:13:51 | 2016-06-12T19:13:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,656 | r | usair_combine.R | #' Traverse from one or more selected edges toward
#' adjacent outward nodes
#' @description From a graph object of class
#' \code{dgr_graph} move to adjacent nodes from a
#' selection of one or more selected edges where the
#' edges are outward edges to those nodes. This creates
#' a selection of nodes. An optional filter by node
#' attribute can limit the set of nodes traversed to.
#' @param usair_dataset the name of the dataset to
#' use; options are \code{co} (carbon monoxide),
#' \code{no2} (nitrogen dioxide), \code{so2} (sulfur
#' dioxide), \code{ozone} (ground-level ozone),
#' \code{pm10} (particulate matter up to 10 microns in
#' diameter), \code{pm25_frm} (particulate matter up
#' to 2.5 microns in diameter, using a Federal
#' Reference Method (FRM) instrument or its equivalent
#' (FEM)), \code{pm25_nfrm} (same as \code{pm25_frm}
#' but not using an FRM or FEM instrument ), and
#' \code{met} (meteorology).
#' @param years a vector of years that represents the
#' starting and ending years to include in the combined
#' dataset.
#' @return a dplyr \code{tb_df} data frame with
#' combined data for multiple years.
#' @examples
#' \dontrun{
#' # Get carbon monoxide (CO) data for the years 1992,
#' # 1993, and 1994 in a single data frame
#' co_1992_1994 <- usair_combine("co", c(1992, 1994))
#'
#' # Get a count of records for this dataset
#' nrow(co_1992_1994)
#' #> [1] 12584901
#' }
#' @import dplyr
#' @export usair_combine
usair_combine <- function(usair_dataset,
years) {
if (!(usair_dataset %in%
c("co", "no2", "so2", "ozone",
"pm10", "pm25_frm", "pm25_nfrm",
"met"))){
stop("The specified dataset does not exist.")
}
dataset_object_name <- paste0(usair_dataset, "_hourly")
years_expanded <- seq(years[1], years[2], 1)
# Determine which data exists
if (usair_dataset == "co") {
filenames <- paste0("co_hourly_", years_expanded, ".rdata")
} else if (usair_dataset == "no2") {
filenames <- paste0("no2_hourly_", years_expanded, ".rdata")
} else if (usair_dataset == "so2") {
filenames <- paste0("so2_hourly_", years_expanded, ".rdata")
} else if (usair_dataset == "ozone") {
filenames <- paste0("ozone_hourly_", years_expanded, ".rdata")
} else if (usair_dataset == "pm10") {
filenames <- paste0("pm10_hourly_", years_expanded, ".rdata")
} else if (usair_dataset == "pm25_frm") {
filenames <- paste0("pm25_frm_fem_hourly_", years_expanded, ".rdata")
} else if (usair_dataset == "pm25_nfrm") {
filenames <- paste0("pm25_non_frm_fem_hourly_", years_expanded, ".rdata")
} else if (usair_dataset == "met") {
filenames <- paste0("met_hourly_", years_expanded, ".rdata")
}
file_locations <- system.file(paste0("data/", filenames), package = "USAir")
if (length(file_locations) == 1) {
if (file_locations == "") {
stop("The selected years do not have data.")
}
}
years_expanded
for (i in 1:length(file_locations)) {
if (i == 1) {
load(file_locations[i])
assign(
"data",
value = get(paste0(
dataset_object_name,
"_",
gsub(".*_([0-9][0-9][0-9][0-9]).rdata$",
"\\1", file_locations[i]))))
}
if (i > 1) {
load(file_locations[i])
assign(
paste0("data_2"),
value = get(paste0(
dataset_object_name,
"_",
gsub(".*_([0-9][0-9][0-9][0-9]).rdata$",
"\\1", file_locations[i]))))
data <- bind_rows(data, data_2)
}
if (i == length(file_locations)) {
rm(data_2)
}
}
return(data)
}
|
5a5e2ac9f549fd14477179ba96164cc8508b9212 | 6ad95fc7067d6ab32bee6f1a6ad730b984d12579 | /ashiq/R codes/chapter 1/example1_sec1_3.R | fd981d852a3f62cb57ea16c99a60937fd9cffb30 | [] | no_license | sahridhaya/BitPlease | d881f95c404ae9a433c79a8f6519ea7eaba9950a | 04e1c38944f0ee24eb1377ced87e6550253fbb2e | refs/heads/master | 2020-04-21T11:36:10.914922 | 2020-04-06T09:02:47 | 2020-04-06T09:02:47 | 169,531,342 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 288 | r | example1_sec1_3.R | #Example 1, section 1.3, page 22
#Find the Dot product of U and V :
DOT =function(U,V) #function to find dot product
{
w<-0
product_matrix <- U*V
for(num in product_matrix)
{
w<-w+num
}
return (w)
}
U<- c(1,-2,3,4)
V<- c(2,3,-2,1)
DOT(U,V)
|
f9b6da4fa04155d34fc14a57a692edda2280345e | e6a93ae315d99a92a2577b29788b7437ce1a5ca8 | /NBJimport.R | c5e329bc9b3e5a8e28d8307825c8337b7254ac81 | [] | no_license | jkmunk/GraphicalLab | 7691774c07290ae9ae09df3f5d8f74d584e69239 | 5913628281404c67669d3c9f2b2b3161a5e0f88d | refs/heads/master | 2023-01-01T17:16:09.213883 | 2020-10-26T10:34:42 | 2020-10-26T10:34:42 | 306,059,550 | 0 | 0 | null | null | null | null | ISO-8859-15 | R | false | false | 1,467 | r | NBJimport.R | # Import from SP as delivered by Nils Bruun Jørgensen
# Data is in one column in Excel file
# Read in as a simple vector at first
setwd("H:/Ongoing projects/Graphical Lab/v0.1/")
library(readxl)
library(dplyr)
#RawIn <- read_excel("Ongoing projects/Graphical Lab/v0.1/Prøvesvar fra NBJ.xlsx",
# col_names = FALSE)
RawIn <- unlist(read_xlsx("Prøvesvar fra NBJ.xlsx",
col_names = FALSE))
# Collect time stamps
Times <- !apply(as.matrix(as.numeric(RawIn)),
FUN = is.na,
MARGIN = 1)
# Collect time stamp line numbers
TimesIndexes <- which(Times)
Times <- RawIn[which(Times)] #%>% unique()
Times <- as.POSIXct(as.Date(as.numeric(Times),
origin = "1899-12-30"))
# Make a list of analyses carried out
counter1 <- counter2 <- counter3 <- 0
Names <- ""
NameVal <- list()
for (e in RawIn) {
counter1 <- counter1 + 1
if (counter1 %in% TimesIndexes) {
counter3 <- counter3 + 1
counter4 <- 0
NameVal[counter3] <- Times[counter3]
}
else if (is.na(e)) {
}
else {
counter2 <- counter2 + 1
Names[counter2] <- strsplit(e,":")[[1]][1]
counter4 <- counter4 + 1
NameVal[[counter3]][counter4] <- list(strsplit(e,":")[[1]])
}
}
names(NameVal) <- Times
Names <- Names %>%
unique()
# Find possible duplicate times
DupTimes <- names(table(Times)[which(as.logical(table(Times) - 1))])
# Merge lists with identical times
|
e6d104a3ffbddacb6b804c029f6b2b9cceb106bc | 782027c0c0d58ceeba379b6f4b4783ed37202521 | /code/07_functional-programming.R | adcb0293a6dce73ab848f21720b56466cebbd10f | [] | no_license | Boukos/PS811-computing | a18964cea191303848ac46add206c18d9fbbf030 | 1f77575e6f8e14967599f72e0c14cc2458bbb208 | refs/heads/master | 2022-06-21T02:38:20.530644 | 2020-05-12T19:05:24 | 2020-05-12T19:05:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,820 | r | 07_functional-programming.R | # ----------------------------------------------------
# PS 811: Statistical Computing
# Spring 2020
# "Functions you can pull up to"
# Loops, functional programming, mapping
# ----------------------------------------------------
# The purpose of this lesson is to teach you about
# VECTORIZED operations.
# These are routines that will
# - make your code run faster
# - make you a more creative problem solver
# - let you do push around A LOT of data EFFICIENTLY and ELEGANTLY
# we cover:
# - loops
# - apply functions
# - lists, list indexing
# - "scoped" dplyr verbs (if, at, all)
# - anonymous functions, quosure/lambda/purrr functions
# - NESTING and MAPPING (!!!)
# ---- packages and data -----------------------
library("here")
library("tidyverse")
library("broom")
# US House elections data
house <-
read_csv(here("data", "house-elections.csv")) %>%
filter(election_year >= 1952) %>%
print()
# looking at the relationship between
# ideology scores (y)
# and district voting for president (x)
# We'd imagine that House members are more LIBERAL
# if their districts vote more heavily for DEMOCRATS for president
ggplot(house) +
aes(x = rep_share_pres, y = nominate_dim1) +
geom_point() +
geom_smooth(method = "lm", aes(color = party))
# ---- LOOPS -----------------------
# For every value i in {1, 2, ..., N}, do some f(i)
# Simple loop to demonstrate main idea
for (j in 1:10) {
print(j)
}
# The routine LOOPS through the vector 1:10
# - call the element "j"
# - do an operation that involves j
# - repeat until we exhaust the vector
# ---- loops in R...HORRIBLE -----------------------
# you probably don't NEED them
# which is good, because they're famously slow and cumbersome
# "for each row, save the number of columns"
# looping requires me to make a container ahead of time
columns_loop <- numeric()
for (row in 1:nrow(house)) {
columns_loop[row] <- length(house[row, ])
}
# ---- VECTORIZE your routine -----------------------
# if your routine is just the SAME FUNCTION
# iterated for every instance
# this will go way faster by applying a "vectorized" function
# apply function "length" for every row (margin = 1) in house
# super speedy
columns <- apply(X = house, MARGIN = 1, FUN = length)
# ---- the "apply functions" -----------------------
# a family of functions for vectorized operations
# I want you to be familiar with the IDEA of these functions.
# Of these functions, lapply() is maybe the only one
# I still find useful (and only sometimes)
# apply(): apply a function over rows or columns of a grid/array.
# Pro: VERY useful for matrix-oriented methods
# where every element will be the same data type!
# Con: Unless you're writing statistical algorithms,
# it isn't common to work DIRECTLY with matrices
?apply
# lapply(): apply a function over the elements of a list.
# LISTS!!! They're great but weird.
# Pro: can contain elements of arbitary type.
# Con: weird to work with sometimes?
# data frames are lists! Just repackaged
house
house_list <- as.list(house)
# aaah!!
house_list
# preview every element in the list
lapply(house_list, head)
list(a = c(1, 2, 3),
b = "hello!!!!!",
d = list)
# the weird part: indexing
str(house_list)
str(house_list[1]) # what's the data type of the first element of this list?
# lol guess again
str(house_list[[1]]) # what's the data type of the data WITHIN the
# first element of the list?
# Gotta do this weird double indexing thing
# https://twitter.com/hadleywickham/status/643381054758363136?lang=en
# tapply(): apply a function(s) to groups of data.
# Common in "dinosaur-style" R.
# We HATE tapply. It gives you a stupid array with named rows/cols named array
# horrible...
tapply(
X = house$nominate_dim1,
INDEX = list(house$party, house$election_year),
FUN = mean,
na.rm = TRUE
)
# what do we do instead? SO much easier
house %>%
group_by(election_year, party) %>%
summarize(
mean_nom = mean(nominate_dim1, na.rm = TRUE)
)
# ---- enter... functional programming with dplyr/purrr -------------
# vectorization: good
# R's built in vectorizing functions: not consistent w/ our workflow
# two big things we want to learn.
# 1) verb_at, verb_if, verb_all
# 2) group_by() %>% nest() %>% map()
# ---- if, at, all verbs -----------------------
# verbs: mutate, rename, select, filter, transmute, count, ...
# "scoped verbs"
# - if: apply function(s) to variables that match a logical condition
# - at: apply function(s) to a selection of variables
# - all: apply function(s) to every variable
# verb_if()
# example: mutate character variables to factors
# predictate is a function that results in a logical: TRUE or FALSE
house %>%
mutate_if(
.predicate = is.character,
.funs = as.factor
)
# .predicate and .funs arguments take objects of type "function"
is.character
class(is.character)
is.character("a")
as.factor("a")
# functions that take function names as arguments like this
# are called FUNCTIONALS
# hence the term "functional programming"
# other examples
select_if(house, is.numeric)
# verb_at()
# example: convert percentage variables to proportions
house %>%
mutate_at(
.vars = vars(starts_with("rep_share")),
.funs = function(x) x / 100
) %>%
select(starts_with("rep_share"))
# you can DEFINE YOUR OWN FUNCTIONS on the fly
# this is the same basic idea as saving a custom function,
# only difference being whether you save the function with a name.
make_into_proportion <- function(x) {
return(x / 100)
}
make_into_proportion(88)
# if you DON'T save the function w/ a name,
# and instead you just use it as a one-off routine,
# this is called an ANONYMOUS FUNCTION
# The other way to do build custom functions on the fly is by doing
# what is sometimes called a "lambda function"
# or a "quosure-style" function
# or a "purrr-style" function
# Advanced R book
# it works a lot like an anonymous function
# example: select variables that contain NA values
select_if(
house,
.predicate = ~ sum(is.na(.)) > 0
)
# function(z) sum(is.na(z)) > 0
# instead of saying function(z), you say `~`
# and then instead of calling z, you call `.`
house %>%
mutate_at(
.vars = vars(rep_share_house, rep_share_pres),
.funs = ~ . / 100
)
# verb_all()
# this example shows how you can apply multiple functions
# by (1) supplying functions as a list and (2) optionally naming each fx
summarize_all(
house,
.funs = list(uniques = ~ n_distinct(.),
obj_type = class)
)
# ---- nesting and mapping -----------------------
# OMG this is the moment I've been waiting for
# This is seriously INSANE.
# One huge benefit of having a data-frame based workflow
# is the way that it can ORGANIZE your work.
# We're about to make that one level of abstraction up
# with NESTED DATA FRAMES
# group_by() %>% nest()
# collapses data within a grouping variable
house %>%
group_by(election_year) %>%
nest()
# This is a NESTED data frame.
# It is a data frame that contains data frames (whoa dude...)
# the `data` column is of type LIST. We call this a list-column.
# It's a column that IS a list! Every element in this list is a data frame,
# but we could have list columns that contain different stuff.
# Why do this?
# We've already seen that we can create new variables with grouped scope
# summarize variables with grouped scope, etc.
# But these operations have some type restrictions.
# by working with nested data frames, I can create outputs
# of essentially ARBITRARY TYPE
# and the results STAY ORGANIZED in the data frame.
# For example, we want to know the relationship between
# ideology (y) and district voting (x),
# BUT it may vary by party and over time.
# So maybe let's estimate a model within each party x year.
# How would we have done this before? Loop over party within year? LAME
nested_house <- house %>%
group_by(party, election_year) %>%
nest() %>%
print()
# make a new column containing model results.
# using the purrr::map() function. (purrr is part of tidyverse)
# map() can be used in a nested data context to map a function
# over a list column.
# how it works:
# mutate(new_variable = map(.x = data_column, .f = function))
# This example: call lm() using "quosure style" function.
nested_models <- nested_house %>%
mutate(
model = map(
.x = data,
.f = ~ lm(nominate_dim1 ~ rep_share_pres, data = .x)
)
) %>%
print()
# Can just call function names though.
# Extra function arguments after function name
nested_tidy <- nested_models %>%
mutate(coefs = map(model, tidy, conf.int = TRUE)) %>%
print()
# unnest required columns when you're done
coefs <- nested_tidy %>%
unnest(coefs) %>%
print()
# can't unnest non-data-frame objects but I can pull them out
nested_tidy %>% unnest(model) # hits error
nested_tidy %>% pull(model) # extracts vector from a data frame
nested_tidy$model # works the same as this
# unnest directly into plot!
coefs %>%
filter(term == "rep_share_pres") %>%
ggplot() +
aes(x = election_year, y = estimate, color = party) +
geom_pointrange(
aes(ymin = conf.low, ymax = conf.high),
position = position_dodge(width = 1)
)
# other ways you can use nesting and mapping
# reading in a list of data frames...
# install.packages("rio")
tibble(filename = list.files(here("data"))) %>%
filter(str_sub(filename, -3L, -1L) %in% c("csv", "dta", "rds", "xls")) %>%
mutate(
data = map(
.x = filename,
.f = ~ {
here("data", .x) %>% # file path
rio::import() %>% # import data
as_tibble() # convert to tidyverse-style DF
}
)
)
# ---- learn more about this stuff -----------------------
# - map functions that return specific output formats (not a list column):
# map_int, map_dbl, map_chr, map_df
# - map a function with TWO input data columns, map2(), or over
# an arbitrary number of input columns, pmap()
# - R 4 Data Science:
# https://r4ds.had.co.nz/iteration.html
# https://r4ds.had.co.nz/many-models.html
# - Purrr cheat sheet:
# https://github.com/rstudio/cheatsheets/blob/master/purrr.pdf
# - other helpful blog posts
# https://www.hvitfeldt.me/blog/purrr-tips-and-tricks/
# https://jennybc.github.io/purrr-tutorial/index.html
# - I have some blog posts where I use map() to do helpful things
# applying different treatment randomizations to nested datasets:
# https://mikedecr.github.io/post/randomization-inference-purrr/
# applying different model specifications to nested datasets:
# https://mikedecr.github.io/post/viz-partials/
|
275fe482a7f81ce0784ce2566c4624c092b79109 | da62ddcc454c064c388665e87b842017054acd7b | /man/to_ndjson.Rd | f39a37b732fa0ab6e77c0a07a087a4847d276901 | [
"MIT"
] | permissive | SymbolixAU/jsonify | d34ae4ced6b389d19c3207b1136257e794cc7710 | 94e34bf9682f7bc29444546b6a9edc7891aa200f | refs/heads/master | 2023-01-22T09:04:19.057360 | 2023-01-11T03:43:29 | 2023-01-11T03:43:29 | 150,085,264 | 67 | 12 | null | 2020-05-30T05:24:13 | 2018-09-24T10:17:37 | C++ | UTF-8 | R | false | true | 1,975 | rd | to_ndjson.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/to_json.R
\name{to_ndjson}
\alias{to_ndjson}
\title{To ndjson}
\usage{
to_ndjson(
x,
unbox = FALSE,
digits = NULL,
numeric_dates = TRUE,
factors_as_string = TRUE,
by = "row"
)
}
\arguments{
\item{x}{object to convert to JSON}
\item{unbox}{logical indicating if single-value arrays should be 'unboxed',
that is, not contained inside an array.}
\item{digits}{integer specifying the number of decimal places to round numerics.
Default is \code{NULL} - no rounding}
\item{numeric_dates}{logical indicating if dates should be treated as numerics.
Defaults to TRUE for speed. If FALSE, the dates will be coerced to character in UTC time zone}
\item{factors_as_string}{logical indicating if factors should be treated as strings. Defaults to TRUE.}
\item{by}{either "row" or "column" indicating if data.frames and matrices should be processed
row-wise or column-wise. Defaults to "row"}
}
\description{
Converts R objects to ndjson
}
\details{
Lists are converted to ndjson non-recursively. That is, each of the objects
in the list at the top level are converted to a new-line JSON object. Any nested
sub-elements are then contained within that JSON object. See examples
}
\examples{
to_ndjson( 1:5 )
to_ndjson( letters )
mat <- matrix(1:6, ncol = 2)
to_ndjson( x = mat )
to_ndjson( x = mat, by = "col" )
df <- data.frame(
x = 1:5
, y = letters[1:5]
, z = as.Date(seq(18262, 18262 + 4, by = 1 ), origin = "1970-01-01" )
)
to_ndjson( x = df )
to_ndjson( x = df, numeric_dates = FALSE )
to_ndjson( x = df, factors_as_string = FALSE )
to_ndjson( x = df, by = "column" )
to_ndjson( x = df, by = "column", numeric_dates = FALSE )
## Lists are non-recurisve; only elements `x` and `y` are converted to ndjson
lst <- list(
x = 1:5
, y = list(
a = letters[1:5]
, b = data.frame(i = 10:15, j = 20:25)
)
)
to_ndjson( x = lst )
to_ndjson( x = lst, by = "column")
}
|
d686041d6342d4a810395d32c63d0af0e6574132 | 554764510bf0b0244919ebcc612c20a020ba846a | /man/adjust_year_effects.Rd | 4eb4f9cbeead7ae215b9818b9abd949d10c31214 | [] | no_license | guytuori/simScores | a42c4091adb55d14ec2fa6606841bb2f05165737 | 33e435930d3a9478f9320d5b2451812430d78d52 | refs/heads/master | 2021-01-10T11:00:16.482569 | 2015-11-10T18:53:54 | 2015-11-10T18:53:54 | 45,933,103 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,619 | rd | adjust_year_effects.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/clean-data.R
\name{adjust_year_effects}
\alias{adjust_year_effects}
\title{Adjust for Changes in Run-Scoring Environment}
\usage{
adjust_year_effects(stats, yfs, type = c("bat", "pit"))
}
\arguments{
\item{stats}{data.frame of player statistics. Obtained from
\code{\link{adjust_park_factors}}.}
\item{yfs}{data.frame of multipliers for year effects. Must be in proper
form.}
\item{type}{character. Whether these are batting or pitching data. Defaults
to batting}
}
\value{
\code{tbl_df} of statistics that have been adjusted for year effects.
}
\description{
Uses year multipliers to adjust statistics to account for changes in run
scoring environment. This is particularly useful because a 0.300 OBP in 2006
is much different from a 0.300 OBP in 2014.
}
\details{
\code{stats} are left joined with \code{yfs}. If any year factors
are missing, they are set to average (which is 100). The statistics are
turned into long format using \code{\link{gather}} from the
\code{tidyr} package. Statistics are then adjusted using the
formula: \deqn{adjusted = Count * 100 / YF} Extraneous columns are
discarded then the data are returned to a wide format using
\code{\link{spread}} from the \code{tidyr} package.
}
\examples{
curr_wd <- getwd()
setwd("N:/Apps/simScoresApp/data")
stats <- read.csv("2-park-adjusted/bat-pf-adjust.csv", header = T, stringsAsFactors = F)
yfs <- read.csv("manual-info/Year-Factors.csv", header = T, stringsAsFactors = F)
x <- adjust_year_effects(stats, yfs, type = "bat")
setwd(curr_wd)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.