blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0acc4b6b12f949f456b71e933fc804499c9f5585
|
f59eb4a66aefa4da49184f241d22011f5e74baee
|
/0-Implementation/R Files/Decision Trees/Season - ELO/ELO11-12.R
|
d1006f4c014eb67f926f75cf2f907492f4f06ef8
|
[] |
no_license
|
Chanter08/Thesis
|
c89c0fa4173a7139ca4a588133e75032ccd54160
|
37e43db4eaa8c119c1ce99d55bedd06270f2ff73
|
refs/heads/master
| 2020-12-02T16:47:56.214945
| 2020-01-02T23:07:52
| 2020-01-02T23:07:52
| 143,121,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,610
|
r
|
ELO11-12.R
|
library("caret")
library(corrplot)
library(C50)
library(dummies)
library(gmodels)
library(Metrics)
library(neuralnet)
library(plyr)
library(rpart)
library(tree)
library(e1071)
library(rpart.plot)
library(fastDummies)
################################## Load Files #############################################
x <-
read.csv(
"C:\\Users\\User\\Documents\\Thesis\\Data\\Models\\ELO\\ELO11-12.csv",
stringsAsFactors = FALSE
)
################################# Clean Data ##############################################
x$B365H <- as.numeric(x$B365H)
x$B365D <- as.numeric(x$B365D)
x$B365A <- as.numeric(x$B365A)
x$BWH <- as.numeric(x$BWH)
x$BWD <- as.numeric(x$BWD)
x$BWA <- as.numeric(x$BWA)
x$GBH <- as.numeric(x$GBH)
x$GBD <- as.numeric(x$GBD)
x$GBA <- as.numeric(x$GBA)
x$IWH <- as.numeric(x$IWH)
x$IWD <- as.numeric(x$IWD)
x$IWA <- as.numeric(x$IWA)
x$LBH <- as.numeric(x$LBH)
x$LBD <- as.numeric(x$LBD)
x$LBA <- as.numeric(x$LBA)
x$SBH <- as.numeric(x$SBH)
x$SBD <- as.numeric(x$SBD)
x$SBA <- as.numeric(x$SBA)
x$WHH <- as.numeric(x$WHH)
x$WHD <- as.numeric(x$WHD)
x$WHA <- as.numeric(x$WHA)
x$VCH <- as.numeric(x$VCH)
x$VCD <- as.numeric(x$VCD)
x$VCA <- as.numeric(x$VCA)
x <- na.exclude(x)
################################## Rename Columns #########################################
colnames(x)[1] <- "Season"
################################ Create Dummy Vars ########################################
x <- cbind.data.frame(x, dummy(x$Home))
x <- cbind.data.frame(x, dummy(x$Away))
########################### Remove Cols After Dummy Vars ##################################
x$Home <- NULL
x$Away <- NULL
x$Season <- NULL
x$date <- NULL
##################################### All Bookies #########################################
y1 <- x
set.seed(123)
y1$FTR <- as.factor(y1$FTR)
y1.rows <- nrow(y1)
y1.sample <- sample(y1.rows, y1.rows * 0.6)
y1.train <- y1[y1.sample,]
y1.test <- y1[-y1.sample,]
y1.model <- C5.0(y1.train[,-1], y1.train$FTR, trails = 100)
plot(y1.model)
summary(y1.model)
y1.predict <- predict (y1.model, y1.test[,-1])
CrossTable(
y1.test$FTR,
y1.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##################################### Bet 365 Only ########################################
y2 <- x[-c(7:27)]
set.seed(123)
y2$FTR <- as.factor(y2$FTR)
y2.rows <- nrow(y2)
y2.sample <- sample(y2.rows, y2.rows * 0.6)
y2.train <- y2[y2.sample, ]
y2.test <- y2[-y2.sample, ]
y2.model <- C5.0(y2.train[, -1], y2.train$FTR, trails = 100)
plot(y2.model)
summary(y2.model)
y2.predict <- predict (y2.model, y2.test[, -1])
CrossTable(
y2.test$FTR,
y2.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
y3 <- x[-c(4:6, 10:27)]
set.seed(123)
y3$FTR <- as.factor(y3$FTR)
y3.rows <- nrow(y3)
y3.sample <- sample(y3.rows, y3.rows * 0.6)
y3.train <- y3[y3.sample, ]
y3.test <- y3[-y3.sample, ]
y3.model <- C5.0(y3.train[, -1], y3.train$FTR, trails = 100)
plot(y3.model)
summary(y3.model)
y3.predict <- predict (y3.model, y3.test[, -1])
CrossTable(
y3.test$FTR,
y3.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
###########################################################################################################
y4 <- x[-c(4:9, 13:27)]
set.seed(123)
y4$FTR <- as.factor(y4$FTR)
y4.rows <- nrow(y4)
y4.sample <- sample(y4.rows, y4.rows * 0.6)
y4.train <- y4[y4.sample, ]
y4.test <- y4[-y4.sample, ]
y4.model <- C5.0(y4.train[, -1], y4.train$FTR, trails = 100)
plot(y4.model)
summary(y4.model)
y4.predict <- predict (y4.model, y4.test[, -1])
CrossTable(
y4.test$FTR,
y4.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
###########################################################################################################
y5 <- x[-c(4:12, 16:27)]
set.seed(123)
y5$FTR <- as.factor(y5$FTR)
y5.rows <- nrow(y5)
y5.sample <- sample(y5.rows, y5.rows * 0.6)
y5.train <- y5[y5.sample, ]
y5.test <- y5[-y5.sample, ]
y5.model <- C5.0(y5.train[, -1], y5.train$FTR, trails = 100)
plot(y5.model)
summary(y5.model)
y5.predict <- predict (y5.model, y5.test[, -1])
CrossTable(
y5.test$FTR,
y5.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
############################################################################################################
y6 <- x[-c(4:15, 19:27)]
set.seed(123)
y6$FTR <- as.factor(y6$FTR)
y6.rows <- nrow(y6)
y6.sample <- sample(y6.rows, y6.rows * 0.6)
y6.train <- y6[y6.sample, ]
y6.test <- y6[-y6.sample, ]
y6.model <- C5.0(y6.train[, -1], y6.train$FTR, trails = 100)
plot(y6.model)
summary(y6.model)
y6.predict <- predict (y6.model, y6.test[, -1])
CrossTable(
y6.test$FTR,
y6.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##############################################################################################################
y7 <- x[-c(4:18, 22:27)]
set.seed(123)
y7$FTR <- as.factor(y7$FTR)
y7.rows <- nrow(y7)
y7.sample <- sample(y7.rows, y7.rows * 0.6)
y7.train <- y7[y7.sample, ]
y7.test <- y7[-y7.sample, ]
y7.model <- C5.0(y7.train[, -1], y7.train$FTR, trails = 100)
plot(y7.model)
summary(y7.model)
y7.predict <- predict (y7.model, y7.test[, -1])
CrossTable(
y7.test$FTR,
y7.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
#####################################################################################
y8 <- x[-c(4:21, 25:27)]
set.seed(123)
y8$FTR <- as.factor(y8$FTR)
y8.rows <- nrow(y8)
y8.sample <- sample(y8.rows, y8.rows * 0.6)
y8.train <- y8[y8.sample, ]
y8.test <- y8[-y8.sample, ]
y8.model <- C5.0(y8.train[, -1], y8.train$FTR, trails = 100)
plot(y8.model)
summary(y8.model)
y8.predict <- predict (y8.model, y8.test[, -1])
CrossTable(
y8.test$FTR,
y8.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
###############################################################################################################
y9 <- x[-c(4:24)]
set.seed(123)
y9$FTR <- as.factor(y9$FTR)
y9.rows <- nrow(y9)
y9.sample <- sample(y9.rows, y9.rows * 0.6)
y9.train <- y9[y9.sample, ]
y9.test <- y9[-y9.sample, ]
y9.model <- C5.0(y9.train[, -1], y9.train$FTR, trails = 100)
plot(y9.model)
summary(y9.model)
y9.predict <- predict (y9.model, y9.test[, -1])
CrossTable(
y9.test$FTR,
y9.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
#################################################################################################################
|
4d83ed92157123055ac356bf4f6cd9babfccae71
|
31be2f912b67317696c3c361ee0c6c32ce14a6f2
|
/rscripts/03_agency_level.R
|
c3cf9ed3dae1356fa2444f7a4c56b4232a9fe4cf
|
[] |
no_license
|
mayerantoine/QuarterlyProgress
|
de2548972398244cd319655dcd28fee3c2155cee
|
7a86ca11da7c05360a4a15856c20638f1d96aeac
|
refs/heads/master
| 2021-09-06T17:16:33.769634
| 2018-02-08T22:02:08
| 2018-02-08T22:02:08
| 111,563,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,054
|
r
|
03_agency_level.R
|
######################################################################################
## Quaterly Progress Report
## APR17
## Mayer Antoine , CDC Haiti
## Purpose : Create OU Level Report Table and Analysis by Agency
## Updated : 12/9/2017
## https://github.com/mayerantoine/QuarterlyProgress
######################################################################################
library(tidyverse)
library(formattable)
library(knitr)
library(scales)
rm(list = ls())
## IMPORT STE By IM Factview ----------------------------------------------------------
source("./rscripts/00_import_factview_data.R")
site_im <- import_factview_site_im()
caption <- c("Data source:ICPI FactView SitexIM")
key_indicators <- c("HTS_TST","HTS_TST_POS","TX_NEW","PMTCT_ART","PMTCT_EID","PMTCT_STAT",
"PMTCT_STAT_POS","TB_ART","TB_STAT","TB_STAT_POS","PMTCT_EID_POS")
#key_indicators <- c("HTS_TST","HTS_TST_POS","TX_NEW","KP_PREV","PMTCT_ART","PMTCT_EID","PMTCT_STAT",
# "PMTCT_STAT_POS","TB_ART","TB_STAT","TB_STAT_POS","PMTCT_EID_POS",
# "PMTCT_EID_Less_Equal_Two_Months",
# "PMTCT_EID_Two_Twelve_Months","PMTCT_STAT_KnownatEntry_POSITIVE",
# "PMTCT_STAT_NewlyIdentified_POSITIVE")
key_cummulative_indicator <- c("TX_CURR", "OVC_SERV","TX_PVLS","TX_RET","OVC_HIVSTAT")
## OU Level Results --------------------------------------------------------------------
# we do not use fy2017apr because we need the code the use for any quarter.
# OU Level Results for non-cummalative
ou_level_non_cummulative <- site_im %>%
#filter(snu1 != "_Military Haiti") %>%
filter(indicator %in% key_indicators) %>%
filter(fundingagency == "HHS/CDC") %>%
filter(disaggregate == "Total Numerator") %>%
#filter(indicatortype == "DSD") %>%
filter(numeratordenom == "N") %>%
select(indicator,fy2015apr,fy2016apr,fy2017q1,fy2017q2,fy2017q3,fy2017q4,fy2017_targets) %>%
group_by(indicator) %>%
summarise(fy2015apr = sum(fy2015apr, na.rm = T),
fy2016apr = sum(fy2016apr,na.rm = T),
fy2017q1 = sum(fy2017q1, na.rm = T),
fy2017q2 = sum(fy2017q2, na.rm = T),
fy2017q3 = sum(fy2017q3,na.rm = T),
fy2017q4 = sum(fy2017q4,na.rm = T),
fy2017_targets= sum(fy2017_targets, na.rm = T)) %>%
mutate(fy2017Cum = fy2017q1+fy2017q2+fy2017q3+fy2017q4,
fy2017Perf = ifelse(fy2017_targets > 0,round((fy2017Cum/fy2017_targets)*100,1), 0))
# OU Level Results for cummulative
ou_level_cummulative <- site_im %>%
filter(snu1 != "_Military Haiti") %>%
filter(indicator %in% key_cummulative_indicator) %>%
filter(fundingagency == "HHS/CDC") %>%
filter(disaggregate == "Total Numerator") %>%
# filter(indicatortype == "DSD") %>%
filter(numeratordenom == "N") %>%
select(indicator,fy2015apr,fy2016apr,fy2017q1,fy2017q2,fy2017q3,fy2017q4,fy2017_targets) %>%
group_by(indicator) %>%
summarise(fy2015apr = sum(fy2015apr, na.rm = T),
fy2016apr = sum(fy2016apr,na.rm = T),
fy2017q1 = sum(fy2017q1, na.rm = T),
fy2017q2 = sum(fy2017q2, na.rm = T),
fy2017q3 = sum(fy2017q3,na.rm = T),
fy2017q4 = sum(fy2017q4,na.rm = T),
fy2017_targets= sum(fy2017_targets, na.rm = T)) %>%
mutate(fy2017Cum = ifelse(fy2017q4 == 0, fy2017q3,fy2017q4),
fy2017Perf = ifelse(fy2017_targets > 0,round((fy2017Cum/fy2017_targets)*100,1), 0))
ou_level <- rbind(ou_level_cummulative,ou_level_non_cummulative)
# summarise tx_curr to calculate tx_net_new
tx_curr <- site_im %>%
# filter(snu1 != "_Military Haiti") %>%
filter(indicator == "TX_CURR") %>%
filter(disaggregate == "Total Numerator") %>%
filter(fundingagency == "HHS/CDC") %>%
# filter(indicatortype == "DSD") %>%
filter(numeratordenom == "N") %>%
select(indicator,fy2015apr,fy2016apr,fy2017q1,fy2017q2,fy2017q3,fy2017q4,fy2017_targets) %>%
group_by(indicator) %>%
summarise(fy2015apr = sum(fy2015apr, na.rm = T),
fy2016apr = sum(fy2016apr,na.rm = T),
fy2017q1 = sum(fy2017q1, na.rm = T),
fy2017q2 = sum(fy2017q2, na.rm = T),
fy2017q3 = sum(fy2017q3,na.rm = T),
fy2017q4 = sum(fy2017q4,na.rm = T),
fy2017_targets = sum(fy2017_targets,na.rm = T))
# calculate net_new by quarter
tx_net_new <- data_frame (indicator = c("TX_NET_NEW"),
fy2015apr = c(0),
fy2016apr = c(tx_curr$fy2016apr - tx_curr$fy2015apr),
fy2017q1 = c(tx_curr$fy2017q1 - tx_curr$fy2016apr),
fy2017q2 = c(tx_curr$fy2017q2 - tx_curr$fy2017q1),
fy2017q3 = c(tx_curr$fy2017q3 - tx_curr$fy2017q2),
fy2017q4 = c(tx_curr$fy2017q4 - tx_curr$fy2017q3),
fy2017Cum = c(fy2017q1+fy2017q2+fy2017q3+fy2017q4),
fy2017_targets = c(tx_curr$fy2017_targets - tx_curr$fy2016apr),
fy2017Perf = ifelse(fy2017_targets > 0,round((fy2017Cum/fy2017_targets)*100,1), 0))
# OU Level Results bind non-cummul , cummul and tx_net_new
ou_level <- rbind(ou_level,tx_net_new)
ou_level
#write_csv(ou_level,"processed_data/ou_level_cdc.csv")
##################################################################################################
tx_indicator <- c("HTS_TST","HTS_TST_POS","TX_NEW","TX_CURR","TX_PVLS","TX_RET")
ou_agency <- site_im %>%
filter(snu1 != "_Military Haiti") %>%
filter(indicator %in% tx_indicator) %>%
filter(disaggregate == "Total Numerator") %>%
filter(indicatortype == "DSD") %>%
filter(numeratordenom == "N") %>%
select(indicator,fundingagency,fy2017apr) %>%
group_by(indicator,fundingagency) %>%
summarise(fy2017apr = sum(fy2017apr, na.rm = T)) %>%
spread(fundingagency,fy2017apr) %>%
select(indicator,`HHS/CDC`,USAID) %>%
as.data.frame()
# summarise tx_curr to calculate tx_net_new
tx_curr <- site_im %>%
filter(snu1 != "_Military Haiti") %>%
filter(indicator == "TX_CURR") %>%
filter(disaggregate == "Total Numerator") %>%
filter(indicatortype == "DSD") %>%
filter(numeratordenom == "N") %>%
filter(!(fundingagency %in% c("Dedup"))) %>%
select(indicator,fundingagency,fy2016apr,fy2017apr) %>%
group_by(indicator,fundingagency) %>%
summarise(fy2016apr = sum(fy2016apr,na.rm = T),
fy2017apr = sum(fy2017apr, na.rm = T)) %>%
mutate(tx_net_new = fy2017apr - fy2016apr) %>%
select(indicator,fundingagency,tx_net_new) %>%
spread(fundingagency,tx_net_new) %>%
as.data.frame()
# calculate net_new by quarter
tx_curr$indicator <- c("TX_NET_NEW")
# OU Level Results bind non-cummul , cummul and tx_net_new
ou_agency <- rbind(ou_agency,tx_curr)
#write_csv(ou_agency,"processed_data/ou_agency.csv")
#####
partner_3_90 <- site_im %>%
filter(snu1 != "_Military Haiti") %>%
filter(indicator %in% c("TX_RET","TX_PVLS")) %>%
filter(standardizeddisaggregate %in% c("Total Numerator","Total Denominator")) %>%
filter(indicatortype == "DSD") %>%
#filter(numeratordenom == "N") %>%
select(implementingmechanismname,fundingagency,psnu,facility,community,
indicator,standardizeddisaggregate,fy2017q4,fy2017_targets) %>%
group_by(implementingmechanismname,fundingagency,psnu,facility,community,
indicator,standardizeddisaggregate) %>%
summarise( fy2017q4 = sum(fy2017q4,na.rm = T),
fy2017_targets= sum(fy2017_targets, na.rm = T))
write_csv(partner_3_90,"processed_data/partner_3_90.csv")
|
054c63b88b670203f7fbf86bb1404ae7b492225e
|
99e681afa2d7fead8d48034b9536d1360bff04af
|
/Vaja2/vaja2.r
|
0071eb5d057a5aa8c2837530bb672ca2f0e5299e
|
[] |
no_license
|
janpristovnik/Financni_praktikum
|
9def6419494c9e1fa5dd8b17f13bf415bad537c8
|
dbed1ea09412b7eb39111ec73d6e779d3fc2a679
|
refs/heads/master
| 2021-05-15T09:08:00.548115
| 2017-12-22T15:43:51
| 2017-12-22T15:43:51
| 108,002,774
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,142
|
r
|
vaja2.r
|
library(actuar)
#Uvozimo podatke in narišemo histogram 1.a
vzorec <- scan("Vaja2/Podatki/vzorec1.txt")
#1.b izberem Weibulovo porazdelitev, saj pri parametrih lambda =1, k=1.5
#njena gosota najbolj spominja na histogram, ki sem ga dobil iz vzorca
Vektor1 <- mde(vzorec, pweibull, start = list(shape = 1, scale = 1), measure = "CvM")
shape <- Vektor1$estimate[1]
scale <- Vektor1$estimate[2]
#1.c
histogram <- hist(vzorec, probability = TRUE, xlab = "Visina odskodnine", ylab = "Frequency")
curve(dweibull(x,shape,scale),add=TRUE, from = 0,to = 9)
histogram2 <- plot(ecdf(vzorec), main = " Porazdelitvena funkcija odskodnin ", ylab = "Porazdelitvena funkcija", xlab = "Visina odskodnine")
#1.d Za porazdelitev števila odškodninskih zahtevkov bomo vzeli: pois( lambda = 15 )
lambda <- 15
Upanje_Y <- scale*gamma(1 + 1/shape)
#Upanje S
Upanje_S <- lambda*Upanje_Y
Varianca_Y <- (scale)^2 * (gamma(1 + 2/shape) - (gamma(1+1/shape))^2)
#Varianca S
Varianca_S <- lambda*Varianca_Y + (Upanje_Y)^2 * lambda
#2.naloga
h = 0.5
n = 19
#2.b
diskretna_y <- discretize(1 - exp(-(x/scale)^shape),from = 0, to= h*n , step = h ,method = "rounding")
diskretna_y1 <- discretize(1 - exp(-(x/scale)^shape),from = 0, to= 1000 , step = h ,method = "rounding")
vektor_x <- seq(0,9,0.5)
graf <- plot(stepfun(vektor_x, diffinv(diskretna_y)))
curve(pweibull(x,shape,scale),add=TRUE, from = 0,to = 9, col = "red", lwd = 2)
#2.c S Panjerjevim algoritmom bom izračunal porazdelitveno funkcijo kumulativne škode S
porazdelitvena <- aggregateDist(method = "recursive",
model.freq = "poisson",
model.sev = diskretna_y1,
x.scale = h,
lambda =15,
tol = 0.002,
convolve = 0
)
plot(porazdelitvena)
#2.d izračunaj upanje in disperzijo komulativne škode Var(s) = var(y)*E(N) + E(y)^2*var(N)
vrednosti <- knots(porazdelitvena)
verjetnosti <- diff(porazdelitvena)
Upanje_S_diskretno <- vrednosti %*% verjetnosti
Var_S <- (vrednosti * vrednosti) %*% verjetnosti - Upanje_S_diskretno^2 # Var(S) = E[S^2] - E[S]^2
#2.e
odst_995 <- VaR(porazdelitvena, 0.995)
izpad_005 <- CTE(porazdelitvena, 0.005)
#3.naloga
#3.a
vektor_N <- rpois(10000, 15)
vektor_vmesni <- c(0)
vektor_S <- vector(mode= "numeric", length = 10000)
stevec =1
for (i in vektor_N) {
vektor_vmesni <- rweibull(i, shape, scale)
vektor_S[stevec] <- sum(vektor_vmesni)
stevec = stevec +1
}
#vektor_S
#3.b
Upanje_simulacija = mean(vektor_S)
Variacija_simulacija = var(vektor_S)
#upanje in varianca zelo podobni
#3.c
<<<<<<< HEAD
#3.d
plot(porazdelitvena)
plot(ecdf(vektor_S))
=======
tvegana_vrednost_ <- sort(vektor_S)[9950]
#3.d
plot(porazdelitvena)
plot(ecdf(vektor_S),
col = 'green',
add = TRUE,
lwd = 2)
legend('bottomright',
legend = c('Panjerjev algoritem', 'Monte Carlo simulacija'),
col = c('black', 'green'),
lty = 1:1,
bty = "n",
lwd = 2)
>>>>>>> a203b847e05557b29cd6ae281a7978152ffbbff6
|
2b77eacc7a3db0563be0e13c5b65e921d44f84f9
|
7372f0f88b855f3377745206ebe9629a1064f185
|
/seawaveQ/man/pesticideTrendCalcs.Rd
|
49dbd3cc0c53c325d7d04db29df2326ccc4d45d9
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0"
] |
permissive
|
kryberg-USGS/seawaveQ
|
eae47969876e6b3977bd4b3eafa449525854a505
|
1ca36d280d4bd4a62baf0fe02b805b4193a430cd
|
refs/heads/master
| 2021-04-09T11:44:21.420868
| 2020-12-13T21:20:42
| 2020-12-13T21:20:42
| 125,564,524
| 0
| 2
| null | 2020-12-14T22:22:10
| 2018-03-16T20:05:54
|
R
|
UTF-8
|
R
| false
| true
| 4,221
|
rd
|
pesticideTrendCalcs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pesticideTrendCalcs.R
\name{pesticideTrendCalcs}
\alias{pesticideTrendCalcs}
\title{Summarize linear trends}
\format{
The data frame returned has one row for each chemical analyzed
and the number of columns are defined as follows: \cr
\tabular{lll}{
pname \tab character \tab parameter analyzed \cr
mclass \tab numeric \tab a value of 1 or 2\cr
alpha \tab numeric \tab a significance level \cr
ctndPpor \tab numeric \tab the concentration trend in percent over the period of record \cr
cuciPpor \tab numeric \tab the concentration upper confidence interval for the trend in\cr
\tab \tab percent over the period of record \cr
clciPpor \tab numeric \tab the concentration lower confidence interval for the trend in\cr
\tab \tab percent over the period of record \cr
baseConc \tab numeric \tab the base concentration, median concentration or midpoint of\cr
\tab \tab trend line, for first year of trend period \cr
ctndOrigPORPercentBase \tab numeric \tab the concentration trend in original units over\cr
\tab \tab the period of record\cr
\tab \tab (calculation based on percent per year and base concentration)\cr
cuciOrigPORPercentBase \tab numeric \tab the concentration trend upper confidence interval\cr
\tab \tab for the trend in original units over the period of record\cr
\tab \tab (calculation based on percent per year and base concentration)\cr
clciOrigPORPercentBase \tab numeric \tab the concentration trend lower confidence interval\cr
\tab \tab for the trend in original units over the period of record\cr
\tab \tab (calculation based on percent per year and base concentration)\cr
ctndlklhd \tab numeric \tab is the concentration trend likelihood \cr
}
}
\usage{
pesticideTrendCalcs(
tndbeg,
tndend,
ctnd,
pval,
alpha,
setnd,
scl,
baseConc,
mclass
)
}
\arguments{
\item{tndbeg}{is the beginning (in whole or decimal years) of the
trend period. Zero means the begin date will be the beginning of the
concentration data, cdat.}
\item{tndend}{is the end of the trend (treated as December 31
of that year). Zero means the end date will be the end of the
concentration data, cdat.}
\item{ctnd}{is the concentration trend, the coefficient on the time variable.}
\item{pval}{is the p-value for the linear trend component.}
\item{alpha}{is the significance level or alpha value for statistical
significance and confidence intervals.}
\item{setnd}{is the standard error for the linear trend component.}
\item{scl}{is the scale factor from the \code{survreg.object}.}
\item{baseConc}{is the base concentration, the median concentration
(midpoint of the trend line) for the first year of the trend analysis .}
\item{mclass}{indicates the class of model to use.
A class 1 model is the the traditional SEAWAVE-Q model that has a
linear time trend. A class 2 model is a newer option for longer
trend periods that uses a set of restricted cubic splines on the
time variable to provide a more flexible model.}
}
\value{
The data frame returned has one row for each chemical analyzed and
summaries of the trend.
}
\description{
Internal function to summarize the trend results.
}
\details{
pesticideTrendCalcs is called from within \link{fitswavecav}
}
\note{
Based on trend calculations used to display and summarize pesticide
trends here \url{https://nawqatrends.wim.usgs.gov/swtrends/}.
A likelihood value that is the functional equivalent of the two-sided
p-value associated with the significance level of the trend was
determined as follows:
Likelihood = (1 - (p-value / 2)), where p-value is the
p-value for the trend coefficient (Oelsner and others, 2017).
}
\references{
Oelsner, G.P., Sprague, L.A., Murphy, J.C., Zuellig, R.E., Johnson, H.M.,
Ryberg, K.R., Falcone, J.A., Stets, E.G., Vecchia, A.V., Riskin, M.L.,
De Cicco, L.A., Mills, T.J., and Farmer, W.H., 2017, Water-quality trends in
the Nation's rivers and streams, 1972--2012---Data preparation, statistical
methods, and trend results (ver. 2.0, October 2017): U.S. Geological Survey
Scientific Investigations Report 2017--5006, 136 p.,
\url{https://doi.org/10.3133/sir20175006}.
}
\author{
Karen R. Ryberg
}
|
669f4b859e9eb19a03fed381beb27b604b65f522
|
280c2378eaeca0585e1656fd0b7530123933b805
|
/shiny-server/rsloan/data/ashd.R
|
5d2c5a65ee2ef3561640d6bbd5295617b6c143bd
|
[
"MIT"
] |
permissive
|
kancheng/rsloan
|
a62c025fc40aadcc19468a41a8fe1cdffbbad719
|
2b2c658ca951f83615fa20711fade428f8bfa143
|
refs/heads/master
| 2020-05-23T10:11:56.172790
| 2017-10-23T04:00:39
| 2017-10-23T04:00:39
| 80,401,210
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 316
|
r
|
ashd.R
|
# data handle
chfa.as = function(x)
{
as.character(as.factor(x))
}
ncf.as = function(x)
{
as.numeric(chfa.as(x))
}
nuch.as = function(x)
{
as.numeric(as.character(x))
}
# data head list
hdlis = function( hliobj, hrol = 5){
lethl = length(hliobj)
for( frl in 1:lethl){
print(head( hliobj[[frl]], hrol))
}
}
|
71eb32723db74ce275be666853171c42b2c654f3
|
7255f071174e76a76399ec3cca5cd674cf397afd
|
/man/gender_vector.Rd
|
b1292acf5789cf2de4dab7663a3003332713e0a1
|
[
"MIT"
] |
permissive
|
bmschmidt/gender
|
628fa527b599aa24e826ac8ba240b996b7a6c09e
|
3ca8f9eb721c1eb22c5f6506f1cf8e3e93f969fb
|
refs/heads/master
| 2021-01-18T17:59:42.170732
| 2016-09-15T19:21:25
| 2016-09-15T19:21:53
| 20,813,775
| 1
| 0
| null | 2016-09-08T22:02:45
| 2014-06-13T18:28:21
|
R
|
UTF-8
|
R
| false
| true
| 2,358
|
rd
|
gender_vector.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gender_vector.R
\name{gender_vector}
\alias{gender_vector}
\title{Predict gender on each element of vector, with automatic defaults}
\usage{
gender_vector(names, years = 2000, year_ends = years,
countries = "United States", threshold = 0.5)
}
\arguments{
\item{names}{First names as a character vector. Names are case insensitive.}
\item{years}{The birth year of the name whose gender is to be predicted.
Default is 2000.}
\item{year_ends}{Optionally, a second set of years to make a range prediction.
This is useful if you are uncertain about a birth year, or if a name is rare and you
want to search a range of years. Defaults to the equivalent value of years.}
\item{countries}{The countries for which datasets are being used. Default is
"United States"
will call the "ipums" method between 1789 and 1930, and "ssa" between 1931 and 2011.
"Canada", "United Kingdom", "Denmark", "Iceland", "Norway", and "Sweden"
will call the 'napp' method between 1758 and 1910.}
\item{threshold}{Certainty required before a name is reported as 'male' or 'female.'
If .8, for example, 80% of occurrences of a name must be female before the name
returns female; otherwise the value will be NA.}
}
\value{
A vector of the same length as the longest passed argument. Each
value will be 'male', 'female', or NA.
}
\description{
This function selects sane defaults for the years and/or countries
supplied. It calls `gender_df` under the hood, so it is efficient at
eliminating duplicate calls. The vectorized results are suitable to
use in a dplyr `mutate` call.
}
\details{
All passed arguments should be of the same length, or else of length
1, in which case they will be recycled. For example, if all the
names are from the United States, just pass "United States" to countries:
but if half are Norwegian and half are Swedish, pass a vector of the
same length as the names.
}
\examples{
# Two men and a woman:
gender_vector(c("Peter","Paul","Mary"))
# One of these names is not like the others.
gender_vector(c("John","Paul","George","Ringo"))
# This one is slow--I'm not sure why, but clearly
# something isn't optimized for the same name on many
# years.
gender_vector("Leslie",years = 1850:1980)
gender_vector(c("Jean"),years = 1900, countries =
c("United States", "Sweden"))
}
|
01404c6859f635efcf69917fc488f5f79c23ca78
|
61c19d82e358cdd3fc7466c748953670ee30e09f
|
/man/id.to.rs.Rd
|
603d7fe49dfad0d2163b2b3ba492df60a5f6b746
|
[] |
no_license
|
cran/humarray
|
7d0352fc54528e30085935922b849f07b26b4cad
|
7d9a894a6ee7ce3a67bdc521bdad9313f567e175
|
refs/heads/master
| 2020-04-06T21:25:50.701686
| 2017-11-19T23:12:32
| 2017-11-19T23:12:32
| 54,423,012
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,097
|
rd
|
id.to.rs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/humarray.R
\name{id.to.rs}
\alias{id.to.rs}
\title{Convert from chip ID labels to dbSNP rs-ids}
\usage{
id.to.rs(ids)
}
\arguments{
\item{ids}{character, meant to be a list of chip ids, but if rs-ids are present they will not be altered.}
}
\value{
A character vector of SNP rs-ids, where the input was chip ids, rs-ids or a mixture, any text
other than this will result in NA values being returned in the character vector output.
}
\description{
Most SNPs will have an 'rs-id' from dbSNP/HapMap, and these are often the standard for reporting or
annotation lookup. These can differ from the IDs used on the chip. This functions looks at the current
snp support (ChipInfo object) and returns rs-ids in place of chip IDs. Currently rs-ids are always
from build37.
}
\examples{
id.to.rs(c("imm_11_2138800","rs9467354","vh_1_1108138")) # middle one is already a rs.id
}
\seealso{
\code{\link{rs.to.id}}, \code{\link{GENE.to.ENS}}, \code{\link{ENS.to.GENE}}
}
\author{
Nicholas Cooper \email{nick.cooper@cimr.cam.ac.uk}
}
|
267f56fde8fb9fdeaeb2aa38d48e29cff5f4f39d
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.customer.engagement/man/pinpointemail_create_dedicated_ip_pool.Rd
|
748824289d20b3f9eb100df513d0fc0802d4c978
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,077
|
rd
|
pinpointemail_create_dedicated_ip_pool.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pinpointemail_operations.R
\name{pinpointemail_create_dedicated_ip_pool}
\alias{pinpointemail_create_dedicated_ip_pool}
\title{Create a new pool of dedicated IP addresses}
\usage{
pinpointemail_create_dedicated_ip_pool(PoolName, Tags)
}
\arguments{
\item{PoolName}{[required] The name of the dedicated IP pool.}
\item{Tags}{An object that defines the tags (keys and values) that you want to
associate with the pool.}
}
\value{
An empty list.
}
\description{
Create a new pool of dedicated IP addresses. A pool can include one or
more dedicated IP addresses that are associated with your Amazon
Pinpoint account. You can associate a pool with a configuration set.
When you send an email that uses that configuration set, Amazon Pinpoint
sends it using only the IP addresses in the associated pool.
}
\section{Request syntax}{
\preformatted{svc$create_dedicated_ip_pool(
PoolName = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
69467bb78ca49b55cc42f6e30a1831518d8c454a
|
13fd537c59bf51ebc44b384d2b5a5d4d8b4e41da
|
/R/tests/testdir_autoGen/runit_simpleFilterTest_syn_fp_prostate_35.R
|
cc033c942ced9ea78f1a24390aefc49a2a22d33f
|
[
"Apache-2.0"
] |
permissive
|
hardikk/h2o
|
8bd76994a77a27a84eb222a29fd2c1d1c3f37735
|
10810480518d43dd720690e729d2f3b9a0f8eba7
|
refs/heads/master
| 2020-12-25T23:56:29.463807
| 2013-11-28T19:14:17
| 2013-11-28T19:14:17
| 14,797,021
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,760
|
r
|
runit_simpleFilterTest_syn_fp_prostate_35.R
|
##
# Author: Autogenerated on 2013-11-27 18:13:58
# gitHash: c4ad841105ba82f4a3979e4cf1ae7e20a5905e59
# SEED: 4663640625336856642
##
source('./findNSourceUtils.R')
Log.info("======================== Begin Test ===========================")
simpleFilterTest_syn_fp_prostate_35 <- function(conn) {
Log.info("A munge-task R unit test on data <syn_fp_prostate> testing the functional unit <<> ")
Log.info("Uploading syn_fp_prostate")
hex <- h2o.uploadFile(conn, locate("../../smalldata/syn_fp_prostate.csv"), "rsyn_fp_prostate.hex")
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"DPROS\" using value -8.40623871554e+58")
filterHex <- hex[hex[,c("DPROS")] < -8.40623871554e+58,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"DPROS" < -8.40623871554e+58,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"VOL\" using value -4.26570480587e+58")
filterHex <- hex[hex[,c("VOL")] < -4.26570480587e+58,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"VOL" < -4.26570480587e+58,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"ID\" using value 2.21369506579")
filterHex <- hex[hex[,c("ID")] < 2.21369506579,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"ID" < 2.21369506579,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"ID\" using value 1.89225495688")
filterHex <- hex[hex[,c("ID")] < 1.89225495688,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"ID" < 1.89225495688,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"CAPSULE\" using value -7.26771279424e+57")
filterHex <- hex[hex[,c("CAPSULE")] < -7.26771279424e+57,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"CAPSULE" < -7.26771279424e+57,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"PSA\" using value 3.13998336331e+58")
filterHex <- hex[hex[,c("PSA")] < 3.13998336331e+58,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"PSA" < 3.13998336331e+58,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"ID\" using value 0.756638586847")
filterHex <- hex[hex[,c("ID")] < 0.756638586847,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"ID" < 0.756638586847,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"PSA\" using value 9.05966309706e+57")
filterHex <- hex[hex[,c("PSA")] < 9.05966309706e+57,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"PSA" < 9.05966309706e+57,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"PSA\" using value -6.3548450841e+58")
filterHex <- hex[hex[,c("PSA")] < -6.3548450841e+58,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"PSA" < -6.3548450841e+58,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"VOL\" using value 6.66519337013e+58")
filterHex <- hex[hex[,c("VOL")] < 6.66519337013e+58,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"VOL" < 6.66519337013e+58,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"GLEASON\" using value 0.128130278031")
filterHex <- hex[hex[,c("GLEASON")] < 0.128130278031,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"GLEASON" < 0.128130278031,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"ID\" using value 6.06938825463")
filterHex <- hex[hex[,c("ID")] < 6.06938825463,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"ID" < 6.06938825463,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"CAPSULE\" using value 6.30128472593e+58")
filterHex <- hex[hex[,c("CAPSULE")] < 6.30128472593e+58,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"CAPSULE" < 6.30128472593e+58,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"CAPSULE\" using value -5.14837717669e+58")
filterHex <- hex[hex[,c("CAPSULE")] < -5.14837717669e+58,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"CAPSULE" < -5.14837717669e+58,]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"VOL\" using value -9.09089642098e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("VOL")] < -9.09089642098e+58, c("VOL")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("VOL")] < -9.09089642098e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"RACE\" using value -4.29361821104e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("RACE")] < -4.29361821104e+58, c("RACE")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("RACE")] < -4.29361821104e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"VOL\" using value 6.13165081905e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("VOL")] < 6.13165081905e+58, c("VOL")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("VOL")] < 6.13165081905e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"RACE\" using value -4.81427960884e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("RACE")] < -4.81427960884e+58, c("RACE")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("RACE")] < -4.81427960884e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"VOL\" using value 1.86520545362e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("VOL")] < 1.86520545362e+58, c("VOL")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("VOL")] < 1.86520545362e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"PSA\" using value -7.94296573001e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("PSA")] < -7.94296573001e+58, c("PSA")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("PSA")] < -7.94296573001e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"DPROS\" using value 4.24589173053e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("DPROS")] < 4.24589173053e+58, c("DPROS")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("DPROS")] < 4.24589173053e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"CAPSULE\" using value 4.11146405088e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("CAPSULE")] < 4.11146405088e+58, c("CAPSULE")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("CAPSULE")] < 4.11146405088e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"DCAPS\" using value 6.29582725618e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("DCAPS")] < 6.29582725618e+58, c("DCAPS")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("DCAPS")] < 6.29582725618e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"AGE\" using value -7.99559970811e+57, and also subsetting columns.")
filterHex <- hex[hex[,c("AGE")] < -7.99559970811e+57, c("AGE")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("AGE")] < -7.99559970811e+57, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"CAPSULE\" using value 4.05043364946e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("CAPSULE")] < 4.05043364946e+58, c("CAPSULE")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("CAPSULE")] < 4.05043364946e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"PSA\" using value -4.13286067396e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("PSA")] < -4.13286067396e+58, c("PSA")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("PSA")] < -4.13286067396e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"RACE\" using value -1.48494835937e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("RACE")] < -1.48494835937e+58, c("RACE")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("RACE")] < -1.48494835937e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
Log.info("Filtering out rows by < from dataset syn_fp_prostate and column \"AGE\" using value -7.26573421896e+58, and also subsetting columns.")
filterHex <- hex[hex[,c("AGE")] < -7.26573421896e+58, c("AGE")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("AGE")] < -7.26573421896e+58, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")]
}
conn = new("H2OClient", ip=myIP, port=myPort)
tryCatch(test_that("simpleFilterTest_ on data syn_fp_prostate", simpleFilterTest_syn_fp_prostate_35(conn)), warning = function(w) WARN(w), error = function(e) FAIL(e))
PASS()
|
a0647fd4b056cbecb11f03aec2d764566aa1c1d2
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5972_9/rinput.R
|
f09b7110a787d71c8aeb17551c64a47ca5ef901b
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5972_9.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5972_9_unrooted.txt")
|
c69394a900d1876acd537102d15f1d6bcc81fa9a
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/tools/R/dynamicHelp.R
|
4a1cdeb9578f47d973ed4a4326866ec350447e4a
|
[
"Artistic-2.0",
"GPL-2.0-or-later",
"LGPL-2.0-or-later",
"Artistic-1.0",
"CECILL-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"GPL-2.0-only",
"BSL-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 28,560
|
r
|
dynamicHelp.R
|
# File src/library/tools/R/dynamicHelp.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2016 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
## This may be asked for
## R.css, favicon.ico
## searches with path = "/doc/html/Search"
## documentation with path = "/doc/....", possibly updated under tempdir()/.R
## demos with path "/demo/*"
## Running demos, using path "/Demo/*"
## html help, either by topic, /library/<pkg>/help/<topic> (pkg=NULL means any)
## or by file, /library/<pkg>/html/<file>.html
httpd <- function(path, query, ...)
{
.HTMLdirListing <- function(dir, base, up)
{
files <- list.files(dir) # note, no hidden files are listed
out <- HTMLheader(paste0("Listing of directory<br/>", dir),
headerTitle = paste("R:", dir), logo=FALSE,
up = up)
if(!length(files))
out <- c(out, gettext("No files in this directory"))
else {
urls <- paste0('<a href="', base, '/', files, '">', files, '</a>')
out <- c(out, "<dl>",
paste0("<dd>", mono(iconv(urls, "", "UTF-8")), "</dd>"),
"</dl>")
}
out <- c(out, "<hr/>\n</body></html>")
list(payload = paste(out, collapse="\n"))
}
.HTMLusermanuals <- function()
{
pkgs <- unlist(.get_standard_package_names())
out <- HTMLheader("R User Manuals")
for (pkg in pkgs) {
vinfo <- getVignetteInfo(pkg)
if (nrow(vinfo))
out <- c(out, paste0('<h2>Manuals in package', sQuote(pkg),'</h2>'),
makeVignetteTable(cbind(Package=pkg, vinfo[,c("File", "Title", "PDF", "R"), drop = FALSE])))
}
out <- c(out, "<hr/>\n</body></html>")
list(payload = paste(out, collapse="\n"))
}
.HTMLsearch <- function(query)
{
bool <- function(x) as.logical(as.numeric(x))
res <- if(identical(names(query), "category")) {
utils::help.search(keyword = query, verbose = 1L, use_UTF8 = TRUE)
} else if(identical(names(query), "results")) {
utils:::.hsearch_results()
} else {
fields <- types <- NULL
args <- list(pattern = ".")
for (i in seq_along(query))
switch(names(query)[i],
pattern = args$pattern <- query[i],
fields.alias =
if(bool(query[i]))
fields <- c(fields, "alias"),
fields.title =
if(bool(query[i]))
fields <- c(fields, "title"),
fields.concept =
if(bool(query[i]))
fields <- c(fields, "concept"),
fields.keyword =
if(bool(query[i]))
fields <- c(fields, "keyword"),
ignore.case =
args$ignore.case <- bool(query[i]),
agrep =
args$agrep <- bool(query[i]),
types.help =
if(bool(query[i]))
types <- c(types, "help"),
types.vignette =
if(bool(query[i]))
types <- c(types, "vignette"),
types.demo =
if(bool(query[i]))
types <- c(types, "demo"),
## Possibly passed from utils:::printhsearchInternal().
package = args$package <- strsplit(query[i], ";")[[1L]],
lib.loc = args$lib.loc <- strsplit(query[i], ";")[[1L]],
warning("Unrecognized search field: ", names(query)[i],
domain = NA)
)
args$fields <- fields
args$use_UTF8 <- TRUE
args$types <- types
do.call(utils::help.search, args)
}
types <- res$types
res <- res$matches
title <- "Search Results"
out <- c(HTMLheader(title),
if ("pattern" %in% names(query) && nchar(query["pattern"]))
paste0('The search string was <b>"', query["pattern"], '"</b>'),
'<hr/>\n')
if(!NROW(res))
out <- c(out, gettext("No results found"))
else {
vigfile0 <- ""
vigDB <- NULL
for (type in types) {
if(NROW(temp <- res[res[,"Type"] == type, , drop=FALSE]) > 0) {
temp <- temp[!duplicated(temp[, "ID"]), , drop = FALSE]
switch(type,
vignette = {
out <- c(out, paste0("<h3>", gettext("Vignettes:"), "</h3>"), "<dl>")
n <- NROW(temp)
vignettes <- matrix("", n, 5L)
colnames(vignettes) <-
c("Package", "File", "Title", "PDF", "R")
for (i in seq_len(NROW(temp))) {
topic <- temp[i, "Topic"]
pkg <- temp[i, "Package"]
vigfile <- file.path(temp[i, "LibPath"], "Meta", "vignette.rds")
if (!identical(vigfile, vigfile0)) {
vigDB <- readRDS(vigfile)
vigfile0 <- vigfile
}
vignette <- vigDB[topic == file_path_sans_ext(vigDB$PDF),]
# There should be exactly one row in the result, but
# bad packages might have more, e.g. vig.Snw and vig.Rnw
vignettes[i,] <- c(pkg, unlist(vignette[1,c("File", "Title", "PDF", "R")]))
}
out <- c(out, makeVignetteTable(vignettes))
},
demo = {
out <- c(out, paste0("<h3>", gettext("Code demonstrations:"), "</h3>"))
out <- c(out, makeDemoTable(temp))
},
help = {
out <- c(out, paste0("<h3>", gettext("Help pages:"), "</h3>"))
out <- c(out, makeHelpTable(temp))
})
}
}
}
out <- c(out, "<hr/>\n</body></html>")
list(payload = paste(out, collapse="\n"))
}
.HTML_hsearch_db_concepts <- function() {
concepts <- utils::hsearch_db_concepts()
s <- concepts$Concept
out <-
c(HTMLheader("Help search concepts"),
c("",
"<table>",
"<tr><th style=\"text-align: left\">Concept</th><th>Frequency</th><th>Packages</th><tr>",
paste0("<tr><td>",
"<a href=\"/doc/html/Search?pattern=",
vapply(reQuote(s), utils::URLencode, "", reserved = TRUE),
"&fields.concept=1&agrep=0\">",
shtmlify(substring(s, 1, 80)),
"</a>",
"</td><td style=\"text-align: right\">",
concepts$Frequency,
"</td><td style=\"text-align: right\">",
concepts$Packages,
"</td></tr>"),
"</table>",
"</body>",
"</html>"))
list(payload = paste(out, collapse = "\n"))
}
.HTML_hsearch_db_keywords <- function() {
keywords <- utils::hsearch_db_keywords()
out <-
c(HTMLheader("Help search keywords"),
c("",
"<table>",
"<tr><th style=\"text-align: left\">Keyword</th><th style=\"text-align: left\">Concept</th><th>Frequency</th><th>Packages</th><tr>",
paste0("<tr><td>",
"<a href=\"/doc/html/Search?category=",
keywords$Keyword,
"\">",
keywords$Keyword,
"</a>",
"</td><td>",
shtmlify(substring(keywords$Concept, 1, 80)),
"</td><td style=\"text-align: right\">",
keywords$Frequency,
"</td><td style=\"text-align: right\">",
keywords$Packages,
"</td></tr>"),
"</table>",
"</body>",
"</html>"))
list(payload = paste(out, collapse = "\n"))
}
unfix <- function(file)
{
## we need to re-fix links altered by fixup.package.URLs
## in R < 2.10.0
fixedfile <- sub("/html/.*", "/fixedHTMLlinks", file)
if(file.exists(fixedfile)) {
top <- readLines(fixedfile)
lines <- readLines(file)
lines <- gsub(paste0(top, "/library"),
"../../", lines, fixed = TRUE)
lines <- gsub(paste0(top, "/doc/"),
"../../../doc/", lines, fixed = TRUE)
return(list(payload=paste(lines, collapse="\n")))
}
list(file = file)
}
mime_type <- function(path)
{
ext <- strsplit(path, ".", fixed = TRUE)[[1L]]
if(n <- length(ext)) ext <- ext[n] else ""
switch(ext,
"css" = "text/css",
"gif" = "image/gif", # in R2HTML
"jpg" = "image/jpeg",
"png" = "image/png",
"svg" = "image/svg+xml",
"html" = "text/html",
"pdf" = "application/pdf",
"eps" =,
"ps" = "application/postscript", # in GLMMGibbs, mclust
"sgml" = "text/sgml", # in RGtk2
"xml" = "text/xml", # in RCurl
"text/plain")
}
charsetSetting <- function(pkg) {
encoding <- read.dcf(system.file("DESCRIPTION", package=pkg),
"Encoding")
if (is.na(encoding))
""
else
paste0("; charset=", encoding)
}
sQuote <- function(text)
paste0("‘", text, "’")
mono <- function(text)
paste0('<span class="samp">', text, "</span>")
error_page <- function(msg)
list(payload =
paste0(HTMLheader("httpd error"), msg, "\n</body></html>"))
cssRegexp <- "^/library/([^/]*)/html/R.css$"
if (grepl("R\\.css$", path) && !grepl(cssRegexp, path))
return(list(file = file.path(R.home("doc"), "html", "R.css"),
"content-type" = "text/css"))
else if(path == "/favicon.ico")
return(list(file = file.path(R.home("doc"), "html", "favicon.ico")))
else if(path == "/NEWS")
return(list(file = file.path(R.home("doc"), "html", "NEWS.html")))
else if(grepl("^/NEWS[.][[:digit:]]$", path))
return(list(file = file.path(R.home("doc"), sub("/", "", path)),
"content-type" = "text/plain; encoding=utf-8"))
else if(!grepl("^/(doc|library|session)/", path))
return(error_page(paste("Only NEWS and URLs under", mono("/doc"),
"and", mono("/library"), "are allowed")))
else if(path == "/doc/html/UserManuals.html")
return(.HTMLusermanuals())
else if(path == "/doc/html/hsearch_db_concepts.html")
return(.HTML_hsearch_db_concepts())
else if(path == "/doc/html/hsearch_db_keywords.html")
return(.HTML_hsearch_db_keywords())
## ----------------------- per-package documentation ---------------------
## seems we got ../..//<pkg> in the past
fileRegexp <- "^/library/+([^/]*)/html/([^/]*)\\.html$"
topicRegexp <- "^/library/+([^/]*)/help/([^/]*)$"
docRegexp <- "^/library/([^/]*)/doc(.*)"
demoRegexp <- "^/library/([^/]*)/demo$"
demosRegexp <- "^/library/([^/]*)/demo/([^/]*)$"
DemoRegexp <- "^/library/([^/]*)/Demo/([^/]*)$"
newsRegexp <- "^/library/([^/]*)/NEWS$"
figureRegexp <- "^/library/([^/]*)/(help|html)/figures/([^/]*)$"
sessionRegexp <- "^/session/"
file <- NULL
if (grepl(topicRegexp, path)) {
## ----------------------- package help by topic ---------------------
pkg <- sub(topicRegexp, "\\1", path)
if (pkg == "NULL") pkg <- NULL # There were multiple hits in the console
topic <- sub(topicRegexp, "\\2", path)
## if a package is specified, look there first, then everywhere
if (!is.null(pkg)) # () avoids deparse here
file <- utils::help(topic, package = (pkg), help_type = "text")
if (!length(file))
file <- utils::help(topic, help_type = "text", try.all.packages = TRUE)
if (!length(file)) {
msg <- gettextf("No help found for topic %s in any package.",
mono(topic))
return(list(payload = error_page(msg)))
} else if (length(file) == 1L) {
path <- dirname(dirname(file))
file <- paste0('../../', basename(path), '/html/',
basename(file), '.html')
## cat("redirect to", file, "\n")
## We need to do this because there are static HTML pages
## with links to "<file>.html" for topics in the same
## package, and if we served one of such a page as a link from
## a different package those links on the page would not work.
return(list(payload = paste0('Redirect to <a href="', file, '">"',
basename(file), '"</a>'),
"content-type" = 'text/html',
header = paste0('Location: ', file),
"status code" = 302L)) # temporary redirect
} else if (length(file) > 1L) {
paths <- dirname(dirname(file))
fp <- file.path(paths, "Meta", "Rd.rds")
tp <- basename(file)
titles <- tp
for (i in seq_along(fp)) {
tmp <- try(readRDS(fp[i]))
titles[i] <- if(inherits(tmp, "try-error"))
"unknown title" else
tmp[file_path_sans_ext(tmp$File) == tp[i], "Title"]
}
packages <- paste0('<dt><a href="../../',
basename(paths), '/html/',
basename(file), '.html">', titles,
'</a></dt><dd> (in package <a href="../../',
basename(paths),
'/html/00Index.html">', basename(paths),
'</a> in library ', dirname(paths), ")</dd>",
collapse = "\n")
return(list(payload =
paste0("<p>",
## for languages with multiple plurals ....
sprintf(ngettext(length(paths),
"Help on topic '%s' was found in the following package:",
"Help on topic '%s' was found in the following packages:"
), topic),
"</p><dl>\n",
packages, "</dl>",
collapse = "\n")
))
}
} else if (grepl(fileRegexp, path)) {
## ----------------------- package help by file ---------------------
pkg <- sub(fileRegexp, "\\1", path)
helpdoc <- sub(fileRegexp, "\\2", path)
if (helpdoc == "00Index") {
## ------------------- package listing ---------------------
file <- system.file("html", "00Index.html", package = pkg)
if(!nzchar(file) || !file.exists(file)) {
msg <- if(nzchar(system.file(package = pkg)))
gettextf("No package index found for package %s",
mono(pkg))
else
gettextf("No package named %s could be found",
mono(pkg))
return(error_page(msg))
} else {
if(.Platform$OS.type == "windows") return(unfix(file))
return(list(file = file))
}
}
## ----------------------- package help file ---------------------
path <- system.file("help", package = pkg)
if (!nzchar(path)) {
msg <- if(nzchar(system.file(package = pkg)))
gettextf("No help found for package %s", mono(pkg) )
else
gettextf("No package named %s could be found", mono(pkg))
return(error_page(msg))
}
## if 'topic' is not a help doc, try it as an alias in the package
contents <- readRDS(sub("/help", "/Meta/Rd.rds", path, fixed = TRUE))
files <- sub("\\.[Rr]d$", "", contents$File)
if(helpdoc %notin% files) {
## or call help()
aliases <- contents$Aliases
lens <- lengths(aliases)
aliases <- structure(rep.int(contents$File, lens),
names = unlist(aliases))
tmp <- sub("\\.[Rr]d$", "", aliases[helpdoc])
if(is.na(tmp)) {
msg <- gettextf("Link %s in package %s could not be located",
mono(helpdoc), mono(pkg))
files <- utils::help(helpdoc, help_type = "text",
try.all.packages = TRUE)
if (length(files)) {
path <- dirname(dirname(files))
files <- paste0('/library/', basename(path), '/html/',
basename(files), '.html')
msg <- c(msg, "<br/>",
"However, you might be looking for one of",
"<p></p>",
paste0('<p><a href="', files, '">',
mono(files), "</a></p>")
)
}
return(error_page(paste(msg, collapse = "\n")))
}
helpdoc <- tmp
}
## Now we know which document we want in which package
dirpath <- dirname(path)
## pkgname <- basename(dirpath)
## RdDB <- file.path(path, pkgname)
outfile <- tempfile("Rhttpd")
Rd2HTML(utils:::.getHelpFile(file.path(path, helpdoc)),
out = outfile, package = dirpath,
dynamic = TRUE)
on.exit(unlink(outfile))
return(list(payload = paste(readLines(outfile), collapse = "\n")))
} else if (grepl(docRegexp, path)) {
## ----------------------- package doc directory ---------------------
pkg <- sub(docRegexp, "\\1", path)
rest <- sub(docRegexp, "\\2", path)
docdir <- system.file("doc", package = pkg)
up <- paste0("/library/", pkg, "/html/00Index.html")
if(!nzchar(docdir))
return(error_page(gettextf("No docs found for package %s",
mono(pkg))))
if(nzchar(rest) && rest != "/") {
file <- paste0(docdir, rest)
exists <- file.exists(file)
if (!exists && rest == "/index.html") {
rest <- ""
file <- docdir
}
if(dir.exists(file))
return(.HTMLdirListing(file,
paste0("/library/", pkg, "/doc", rest),
up))
else if (exists)
return(list(file = file, "content-type" = mime_type(rest)))
else
return(error_page(gettextf("URL %s was not found", mono(path))))
} else {
## request to list <pkg>/doc
return(.HTMLdirListing(docdir,
paste("/library", pkg, "doc", sep="/"),
up))
}
} else if (grepl(demoRegexp, path)) {
pkg <- sub(demoRegexp, "\\1", path)
url <- paste0("http://127.0.0.1:", httpdPort(),
"/doc/html/Search?package=",
pkg, "&agrep=0&types.demo=1&pattern=")
return(list(payload = paste0('Redirect to <a href="', url,
'">help.search()</a>'),
"content-type" = 'text/html',
header = paste0('Location: ', url),
"status code" = 302L)) # temporary redirect
} else if (grepl(demosRegexp, path)) {
pkg <- sub(demosRegexp, "\\1", path)
demo <- sub(demosRegexp, "\\2", path)
file <- system.file(file.path("demo", demo), package=pkg)
return(list(file = file, "content-type" = mime_type(demo)))
} else if (grepl(DemoRegexp, path)) {
pkg <- sub(DemoRegexp, "\\1", path)
demo <- sub(DemoRegexp, "\\2", path)
demo(demo, package=pkg, character.only=TRUE, ask=FALSE)
return( list(payload = paste0("Demo '", pkg, "::", demo,
"' was run in the console.",
" To repeat, type 'demo(",
pkg, "::", demo,
")' in the console.")) )
} else if (grepl(newsRegexp, path)) {
pkg <- sub(newsRegexp, "\\1", path)
if (!is.null(query) && !is.na(subset <- query["subset"])) {
# See utils:::print.news_db for the encoding of the subset
rle <- strsplit(subset, "_")[[1]]
rle <- structure(list(lengths = as.numeric(rle),
values = rep(c(TRUE, FALSE), length.out = length(rle))),
class = "rle")
news <- news(inverse.rle(rle)[-1], package = pkg)
} else
news <- news(package = pkg)
formatted <- toHTML(news,
title=paste("NEWS in package", sQuote(pkg)),
up="html/00Index.html")
if (length(formatted))
return( list(payload = paste(formatted, collapse="\n")) )
else
return( list(file = system.file("NEWS", package = pkg),
"content-type" = paste0("text/plain", charsetSetting(pkg) ) ) )
} else if (grepl(figureRegexp, path)) {
pkg <- sub(figureRegexp, "\\1", path)
fig <- sub(figureRegexp, "\\3", path)
file <- system.file("help", "figures", fig, package=pkg)
return( list(file=file, "content-type" = mime_type(fig)) )
} else if (grepl(sessionRegexp, path)) {
tail <- sub(sessionRegexp, "", path)
file <- file.path(tempdir(), tail)
return( list(file=file, "content-type" = mime_type(tail)) )
} else if (grepl(cssRegexp, path)) {
pkg <- sub(cssRegexp, "\\1", path)
return( list(file = system.file("html", "R.css", package = pkg),
"content-type" = "text/css") )
} else if (startsWith(path, "/library/")) {
descRegexp <- "^/library/+([^/]+)/+DESCRIPTION$"
if(grepl(descRegexp, path)) {
pkg <- sub(descRegexp, "\\1", path)
file <- system.file("DESCRIPTION", package = pkg)
return(list(file = file, "content-type" = paste0("text/plain", charsetSetting(pkg))))
} else
return(error_page(gettextf("Only help files, %s, %s and files under %s and %s in a package can be viewed", mono("NEWS"),
mono("DESCRIPTION"), mono("doc/"), mono("demo/"))))
}
## ----------------------- R docs ---------------------
if(path == "/doc/html/Search.html") {
## redirect to the page that has search enabled
list(file = file.path(R.home("doc"), "html/SearchOn.html"))
} else if(path == "/doc/html/Search") {
.HTMLsearch(query)
} else if(path == "/doc/html/packages.html") {
## remake as needed
utils::make.packages.html(temp = TRUE)
list(file = file.path(tempdir(), ".R", path))
} else if(path == "/doc/html/rw-FAQ.html") {
file <- file.path(R.home("doc"), sub("^/doc", "", path))
if(file.exists(file))
list(file = file, "content-type" = mime_type(path))
else {
url <- "https://cran.r-project.org/bin/windows/base/rw-FAQ.html"
return(list(payload = paste0('Redirect to <a href="', url, '">"',
url, '"</a>'),
"content-type" = 'text/html',
header = paste0('Location: ', url),
"status code" = 302L)) # temporary redirect
}
} else if(grepl("doc/html/.*html$" , path) &&
file.exists(tmp <- file.path(tempdir(), ".R", path))) {
## use updated version, e.g. of packages.html
list(file = tmp)
} else if(grepl("doc/manual/.*html$" , path)) {
file <- file.path(R.home("doc"), sub("^/doc", "", path))
if(file.exists(file))
list(file = file, "content-type" = mime_type(path))
else if(file.exists(file <- sub("/manual/", "/html/", file))) {
## tarball has pre-built version of R-admin.html
list(file = file, "content-type" = mime_type(path))
} else {
## url <- "https://cran.r-project.org/manuals.html"
version <-
if(grepl("unstable", R.version$status)) "r-devel" else "r-patched"
url <- file.path("https://cran.r-project.org/doc/manuals",
version, basename(path))
return(list(payload = paste0('Redirect to <a href="', url, '">"',
url, '"</a>'),
"content-type" = 'text/html',
header = paste0('Location: ', url),
"status code" = 302L)) # temporary redirect
}
} else {
if(startsWith(path, "/doc/")) {
## /doc/AUTHORS and so on.
file <- file.path(R.home("doc"), sub("^/doc", "", path))
} else return(error_page(gettextf("unsupported URL %s", mono(path))))
if(!file.exists(file))
error_page(gettextf("URL %s was not found", mono(path)))
else
list(file = file, "content-type" = mime_type(path))
}
}
## 0 = untried, < 0 = failed to start, > 0 = actual port
httpdPort <- local({
port <- 0L
function(new) {
if(!missing(new))
port <<- new
else
port
}
})
startDynamicHelp <- function(start = TRUE)
{
if(nzchar(Sys.getenv("R_DISABLE_HTTPD"))) {
httpdPort(-1L)
warning("httpd server disabled by R_DISABLE_HTTPD", immediate. = TRUE)
utils::flush.console()
return(invisible(httpdPort()))
}
port <- httpdPort()
if (is.na(start)) {
if(port <= 0L) return(startDynamicHelp(TRUE))
return(invisible(port))
}
if (start && port) {
if(port > 0L) stop("server already running")
else stop("server could not be started on an earlier attempt")
}
if(!start && (port <= 0L))
stop("no running server to stop")
if (start) {
utils::flush.console()
OK <- FALSE
ports <- getOption("help.ports")
if (is.null(ports)) {
## Choose 10 random port numbers between 10000 and 32000.
## The random seed might match
## on multiple instances, so add the time as well. But the
## time may only be accurate to seconds, so rescale it to
## 5 minute units.
ports <- 10000 + 22000*((stats::runif(10) + unclass(Sys.time())/300) %% 1)
}
ports <- as.integer(ports)
if (all(ports == 0))
return(invisible(0))
message("starting httpd help server ...", appendLF = FALSE)
for(i in seq_along(ports)) {
## the next can throw an R-level error,
## so do not assign port unless it succeeds.
status <- .Call(C_startHTTPD, "127.0.0.1", ports[i])
if (status == 0L) {
OK <- TRUE
httpdPort(ports[i])
break
}
if (status != -2L) break
## so status was -2, which means port in use
}
if (OK) {
message(" done")
utils::flush.console()
## FIXME: actually test the server
} else {
warning("failed to start the httpd server", immediate. = TRUE)
utils::flush.console()
httpdPort(-1L)
}
} else {
## Not really tested
.Call(C_stopHTTPD)
httpdPort(0L)
}
invisible(httpdPort())
}
dynamicHelpURL <-
function(path, port = httpdPort())
paste0("http://127.0.0.1:", port, path)
## environment holding potential custom httpd handlers
.httpd.handlers.env <- new.env()
|
857db95d6f03168d5ff7af5e92e33d787ddf9865
|
a5b1cc470773889f88d19070dca627f7bcf952ad
|
/run_analysis.R
|
9042a2a8d74c4c5f4e24d4343fa73586579eb73a
|
[] |
no_license
|
sir-real/coursera_getting_cleaning_data
|
313b41d0b9aa5498d3b3934cf5e278bda6db86e1
|
1ba6a5b5669ee48341b7c727f862ea0f53aaa45c
|
refs/heads/master
| 2021-04-26T23:52:51.642849
| 2018-03-05T07:07:01
| 2018-03-05T07:07:01
| 123,874,073
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,172
|
r
|
run_analysis.R
|
#Package Includes
library(dplyr)
# Step 1 - Getting and Merging the Data
featuredata <- read.table("./UCI HAR Dataset/features.txt")
featuredata <- as.character(featuredata[,2])
xtraindata <- read.table("./UCI HAR Dataset/train/X_train.txt")
ytraindata <- read.table("./UCI HAR Dataset/train/y_train.txt")
subjecttraindata <- read.table("./UCI HAR Dataset/train/subject_train.txt")
colnames(ytraindata) <- "Activity"
colnames(subjecttraindata) <- "Volunteer"
colnames(xtraindata) <- featuredata
xtestdata <- read.table("./UCI HAR Dataset/test/X_test.txt")
ytestdata <- read.table("./UCI HAR Dataset/test/y_test.txt")
subjecttestdata <- read.table("./UCI HAR Dataset/test/subject_test.txt")
colnames(ytestdata) <- "Activity"
colnames(subjecttestdata) <- "Volunteer"
colnames(xtestdata) <- featuredata
traindata <- data.frame(subjecttraindata, ytraindata, xtraindata, check.names = F)
testdata <- data.frame(subjecttestdata, ytestdata, xtestdata, check.names = F)
alldata <- rbind(traindata, testdata)
#Extracting the desired column indeces - Mean & Standard Deviation
newdataindex <- grep("volunteer|activity|mean|std", tolower(colnames(alldata)))
newdata <- alldata[newdataindex]
#Step 3 - Uses Descriptive Activity Names to Name the Activities in the Data Set
#Step 4 - Appropriately labels the data set with descriptive variable names
activitydata <- read.table("./UCI HAR Dataset/activity_labels.txt")
colnames(activitydata) <- c("LabelId", "Activity")
activitylabels <- as.character(activitydata$Activity)
newdata$Activity <- activitylabels[newdata$Activity]
colnames(newdata) <- gsub("fBody", "Body", colnames(newdata))
colnames(newdata) <- gsub("tBody", "Body", colnames(newdata))
colnames(newdata) <- gsub("\\.", " ", colnames(newdata))
colnames(newdata) <- gsub("\\ ", " ", colnames(newdata))
#Step 5 - From the data set in step 4, creates a second,
#independent tidy data set with the average of each variable for each activity and each subject.
tidydata <- aggregate(.~Activity+Volunteer, newdata, mean)
write.table(tidydata, "tidydata.txt", row.names = F)
#Reading it back
readtidydata <- read.table("./tidydata.txt")
print(head(readtidydata))
|
38638778d45bad068bfd0d4ff72539ee51e1cb63
|
ab7d15d06ed92cd51cc383dc9e98ae2a8fa41eaa
|
/R/zzz.R
|
e55da2823a952cf79c036bfafb8550760306d742
|
[
"MIT"
] |
permissive
|
rich-iannone/DiagrammeR
|
14c46eb994eb8de90c50166a5d2d7e0668d3f7c5
|
218705d52d445c5d158a04abf8107b425ea40ce1
|
refs/heads/main
| 2023-08-18T10:32:30.784039
| 2023-05-19T16:33:47
| 2023-05-19T16:33:47
| 28,556,914
| 1,750
| 293
|
NOASSERTION
| 2023-07-10T20:46:28
| 2014-12-28T08:01:15
|
R
|
UTF-8
|
R
| false
| false
| 1,266
|
r
|
zzz.R
|
#nocov start
utils::globalVariables(
c(
".",
"action_index",
"action_name",
"attr",
"attr_type",
"deg",
"degree",
"df_id",
"df_id__",
"display",
"duration",
"edge",
"edges",
"edge_from_to",
"e_id",
"fa_uri",
"fillcolor",
"from",
"from.y",
"from_label",
"from_to",
"from_type",
"function_used",
"hex",
"id",
"id_external",
"id.x",
"id.y",
"id__",
"indeg",
"indegree",
"indegree_dist",
"indegree_hist",
"index",
"index__",
"image",
"label",
"label_col",
"n",
"new_fillcolor",
"new_node_id",
"node",
"nodes",
"node_edge__",
"node_id",
"outdeg",
"outdegree",
"outdegree_dist",
"outdegree_hist",
"rel",
"step_created_edges",
"step_created_nodes",
"step_deleted_edges",
"step_deleted_nodes",
"step_init_with_edges",
"step_init_with_nodes",
"string",
"time_modified",
"to",
"to.y",
"to_label",
"to_type",
"total_degree",
"total_degree_dist",
"total_degree_hist",
"type",
"V1",
"V2",
"value",
"value.x",
"value_x",
"value.y",
"version_id",
"x",
"y"
)
)
#nocov end
|
4aed18a0b289e4c77310cebf73d79d510d837cb0
|
7b74f00cd80694634e6925067aaeb6572b09aef8
|
/2018/Assignment/FE8828-LiangXuesen/Assignment1/app.R
|
a5fcd7b527cef9da45a80cb8a91c40a9c56f27ed
|
[] |
no_license
|
leafyoung/fe8828
|
64c3c52f1587a8e55ef404e8cedacbb28dd10f3f
|
ccd569c1caed8baae8680731d4ff89699405b0f9
|
refs/heads/master
| 2023-01-13T00:08:13.213027
| 2020-11-08T14:08:10
| 2020-11-08T14:08:10
| 107,782,106
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,542
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("CrystallBall Energy Trading and Risk Management Inc."),
hr(),
navbarPage(
title="CrystallBall",
tabPanel("About us",
hr(),
titlePanel(h1("Founder")),
sidebarLayout(position="left",
sidebarPanel(
h3("Personal Statement"),
hr(),
h4("Proven FinTech professional with 13 years+ of experience in front
office trading and risk management system implementation and support.
Sound financial market knowledge and hands-on front office user facing experience
especially in energy trading and risk management industry. Strong risk management and
trading analysis modelling expertise (VBA & SQL) in leading commodity trading houses.
Familiar with Java, C#, SQL, VBA, and energy trading and risk management systems (ETRM).")
),
mainPanel(tags$img(src = "myPhoto.JPG",align="center"))
),
hr(),
titlePanel(h1("Company History")),
wellPanel(h3("Comany established in 2018. To become a true market leader in CTRM system, and
gain reputation in the market with capabilities and features other systems do not have. Through
combining deep understanding of commodity/energy trading with technology Innovation, be courageous
in adopting new innovative approaches."))
),
tabPanel("Services",
hr(),
tabsetPanel(
tabPanel(h4("Risk Management"),
wellPanel(
#point list
tags$ul(h1("Energy Risk Mangement"),
tags$li(h4("Forward curve management")),
tags$li(h4("Price risk management")),
tags$li(h4("Position management - Futures, Options, Physical Cargos")),
tags$li(h4("Logistic - Shipment & Inventory mangement"))
),
#add link
a(h4("Check the latest Oil prices"), href="http://www.oil-price.net/")
)
),
tabPanel(h4("Trading Analytics"),
fluidRow(
column(4, h3("Refinery Capacity"), tags$img(src = "Refinery.JPG", height = 450, width = 400,align="center")),
column(4, h3("Demand & Supply Forecast"), tags$img(src = "DemandSupply.JPG", height = 450, width = 400,align="center")),
column(4, h3("Ship Tracking"), tags$img(src = "Ship.JPG", height = 450, width = 400,align="center"))
)
),
tabPanel(h4("Trading Optimization"), tags$img(src = "Optimization.JPG",align="center"))
)),
navbarMenu(title="Contact us",
hr(),
tabPanel("Address",
titlePanel(h1("Address:\n\n")),
mainPanel(
h2("#50-01, Suntec City Tower 2, Singapore\n\n\n\n"),
tags$img(src = "AddressPic.JPG", height = 450, width = 600,align="center")
)
),
tabPanel("Phone",
navlistPanel(
"Contact us",
tabPanel(h4("Office"), h1("+65-6666 8888")),
tabPanel(h4("Mobile"), h1("+65-9070 9731"))
)
),
tabPanel("Email",
titlePanel(h1("Email address: liangxuesen0907@hotmail.com")),
a(h2("Email us now"), href="mailto:liangxuesen0907@hotmail.com"),
tags$img(src="Steve.PNG",align="center")
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
}
# Run the application
shinyApp(ui = ui, server = server)
|
4c0ece0db60a1742e53900189cad37f678298c16
|
6b2ccc8032c85e93cc67971c5a6dabf1bf6aac20
|
/download-and-unzip-data.r
|
bda473a22e64b7c4c549e72f49ee0031bc896b56
|
[] |
no_license
|
anhnguyendepocen/r-code-examples
|
851adbbff4f51b8a75a8b71a1dfd303669e5de44
|
13fbab181f874de1db2b0899b10efafc825fa625
|
refs/heads/master
| 2022-01-28T21:19:54.726404
| 2019-04-02T14:35:27
| 2019-04-02T14:35:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
download-and-unzip-data.r
|
# Download And Unzip Data
# download a zip file containing broadband data, save it to the working directory
download.file("http://www2.ntia.doc.gov/files/broadband-data/AK-NBM-CSV-Dec-2012.zip", destfile="AK-NBM-CSV-Dec-2012.zip")
# unzip the file
unzip("AK-NBM-CSV-Dec-2012.zip")
# unzip the file inside the zip file
unzip("AK-NBM-WIRELESS-CSV-Dec-2012.zip")
# read the data into R, with "|" seperating values
data <- read.delim("AK-NBM-Wireless-CSV-DEC-2012.TXT", sep = "|")
|
67e107371d63a44a6c76d72915de937d5eb2710e
|
0cc8920c857ada69f8d15d734663062c1337f109
|
/R/spaceopt/scoring/space_allocation_loop.R
|
9b020f97d52f7b65d7cf1029a0fc9be9c12283a2
|
[] |
no_license
|
kpushkar/scalene
|
783f08a13ade0da14454f3b19b71b1c1cc92cdae
|
d38412597dcb8b3081b6e0b9567fd2c4e93a91df
|
refs/heads/master
| 2021-01-12T02:27:28.296528
| 2017-01-26T10:36:02
| 2017-01-26T10:36:02
| 77,958,383
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,664
|
r
|
space_allocation_loop.R
|
# version 2 of space allocation
# this time using a completely simplistic approach of space allocation
# and then compare with the non-linear optimisation answer
# logic is as follows
# while space left to allocate
# allocate space to product with greatest incremental sales
# update next incremental sales value for the product
# end
source("R/utils/scalene_utilities.R")
# SPACE_ELASTICITY_VALUES_DB <- read.csv("data/spaceopt/output/Space_Elasticity_Values_DB.csv",
# stringsAsFactors = F)
SPACE_ELASTICITY_PARAMS_DB <- read.csv("data/spaceopt/output/Space_Elasticity_Params_DB.csv",
stringsAsFactors = F)
# lets have only few product ids at the moment
# SPACE_ELASTICITY_PARAMS_DB <- SPACE_ELASTICITY_PARAMS_DB[5:7,]
# SPACE_ELASTICITY_VALUES_DB <- SPACE_ELASTICITY_VALUES_DB[5:7,]
#
# TOTAL_SPACE = 10
initialize_space_allocation <- function(total_space)
{
data.frame(SPACE_UNIT=seq(1,total_space,1),
PRODUCT_ID=NA,
INCREMENTAL_KPI=NA)
}
# function to check if all space is allocated
# returns TRUE if all space is allocated
# else FALSE
all_space_allocated <- function()
{
if(length(which(is.na(ALLOCATED_SPACE$PRODUCT_ID))) > 0)
FALSE
else
TRUE
}
# update the incremental sales for a product id and currently allocated space
# retrieve elasticity params from elasticity_params_db
get_incremental_kpi <- function(product_id, allocated_space,increment=1)
{
# get the elasticity curve parameters for product_id
match_row = which(SPACE_ELASTICITY_PARAMS_DB$PRODUCT_ID==product_id)
# print(sprintf("Product id found in row: %d", match_row))
a = SPACE_ELASTICITY_PARAMS_DB$ALPHA[match_row]
b = SPACE_ELASTICITY_PARAMS_DB$BETA[match_row]
g = SPACE_ELASTICITY_PARAMS_DB$GAMMA[match_row]
d = SPACE_ELASTICITY_PARAMS_DB$DELTA[match_row]
# if allocated_space = NA, then make it 0
if(is.na(allocated_space))
allocated_space = 0
# find KPI with current space allocation
if(allocated_space==0)
current_kpi = 0
else
current_kpi = space_curve_value(allocated_space,c(a,b,g,d))
# doing this if-else because space_curve_value is non-zero for x=0
next_kpi = space_curve_value(allocated_space+increment,c(a,b,g,d))
# print(sprintf("Current kpi: %s and next kpi is %s",current_kpi,next_kpi))
# if for some reason next_kpi is less than current_kpi, then we will keep incremental to 0
max(next_kpi - current_kpi,0)
}
initialize_incremental_kpi <- function()
{
INCREMENTAL_KPI_DF <- data.frame(PRODUCT_ID=SPACE_ELASTICITY_PARAMS_DB$PRODUCT_ID,
ALLOCATED_SPACE_COUNT=rep(x = 0, times = nrow(SPACE_ELASTICITY_PARAMS_DB)),
INCREMENTAL_KPI=sapply(SPACE_ELASTICITY_PARAMS_DB$PRODUCT_ID,
get_incremental_kpi,
allocated_space = 0),
stringsAsFactors = F)
INCREMENTAL_KPI_DF
}
# initialize the incremental KPI dataframe
ALLOCATED_SPACE <- initialize_space_allocation(TOTAL_SPACE)
INCREMENTAL_KPI <- initialize_incremental_kpi()
# lets start to allocate
while(!all_space_allocated())
{
space_to_allocate <- min(which(is.na(ALLOCATED_SPACE$PRODUCT_ID)))
# print(sprintf("We are allocating space unit %d",space_to_allocate))
# find the product_id with maximum incremental sales
max_incremental_kpi <- max(INCREMENTAL_KPI$INCREMENTAL_KPI)
# if more than 1 match, then pick the first one
max_row <- which(INCREMENTAL_KPI$INCREMENTAL_KPI==max_incremental_kpi)[1]
# TODO: need some error checking
max_pid <- INCREMENTAL_KPI$PRODUCT_ID[max_row]
# allocate space to this product id
ALLOCATED_SPACE$PRODUCT_ID[space_to_allocate] = max_pid
ALLOCATED_SPACE$INCREMENTAL_KPI[space_to_allocate] = max_incremental_kpi
# update the incremental KPI for this product
current_space_allocation <- INCREMENTAL_KPI$ALLOCATED_SPACE_COUNT[max_row]+1
next_incremental_kpi <- get_incremental_kpi(max_pid,current_space_allocation)
INCREMENTAL_KPI$ALLOCATED_SPACE_COUNT[max_row] = current_space_allocation
INCREMENTAL_KPI$INCREMENTAL_KPI[max_row] = next_incremental_kpi
}
print(sprintf("Maximum possible KPI is $ %s from %d space",
format(sum(ALLOCATED_SPACE$INCREMENTAL_KPI),big.mark = ",",scientific = F),TOTAL_SPACE))
# print("Space allocation is as follows")
# print(table(ALLOCATED_SPACE$PRODUCT_ID))
### cleanups
rm(current_space_allocation)
rm(max_incremental_kpi)
rm(max_pid)
rm(max_row)
rm(next_incremental_kpi)
rm(space_to_allocate)
|
eb9c4e5600f01e228459473a809f2f7b71fb2553
|
703ba5f02aaeddbfd44489ee75b38b7bddc68b1d
|
/stuff.R
|
65feb62db79f11b7710e5a04f70a6161763d5c5d
|
[] |
no_license
|
phil8192/kaggle-lmgpip
|
237f21ab62aa2e841e4962f06fcdde646cee2ad9
|
56c1d16fd67a98cea2fcea7c4247263de66ff007
|
refs/heads/master
| 2016-09-05T17:02:06.954235
| 2015-08-29T17:32:14
| 2015-08-29T17:32:14
| 41,601,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,351
|
r
|
stuff.R
|
rm(list=ls())
# kaggle's gini function (ripped verbatim from https://www.kaggle.com/wiki/RCodeForGini)
#"NormalizedGini" is the other half of the metric. This function does most of the work, though
SumModelGini <- function(solution, submission) {
df = data.frame(solution = solution, submission = submission)
df <- df[order(df$submission, decreasing = TRUE),]
df
df$random = (1:nrow(df))/nrow(df)
df
totalPos <- sum(df$solution)
df$cumPosFound <- cumsum(df$solution) # this will store the cumulative number of positive examples found (used for computing "Model Lorentz")
df$Lorentz <- df$cumPosFound / totalPos # this will store the cumulative proportion of positive examples found ("Model Lorentz")
df$Gini <- df$Lorentz - df$random # will store Lorentz minus random
#print(df)
return(sum(df$Gini))
}
NormalizedGini <- function(solution, submission) {
SumModelGini(solution, submission) / SumModelGini(solution, solution)
}
# my auxiliary.
# all(range(sapply(1:69, function(x) norml.range(x, 0.1, 0.9, 1, 69))) == c(0.1, 0.9))
norml.range <- function(v, from, to, minv=min(v), maxv=max(v)) (((to-from)*(v-minv))/(maxv-minv))+from
# 0 == sum(abs(round(sapply(1:69, function(x) norml.range.rev(norml.range(x, 0.1, 0.9, 1, 69), 1, 69, 0.1, 0.9)), 1) - 1:69))
norml.range.rev <- function(v, minv, maxv, from=min(v), to=max(v)) (((v-from)*(maxv-minv))/(to-from))+minv
# mean 0, sd 1.
standardise <- function(v, m=mean(v), s=sd(v)) (v-m)/s
rmse <- function(des, out) sqrt(mean((des-out)^2))
mae <- function(des, out) mean(abs(des-out))
# conf.
valid.ratio <- 0
### prep data.
# training
train <- read.csv("data/train.csv", header=T)
# testing (used at end for final predictions)
# add dummy Hazard column as to match training data dim.
test.data <- read.csv("data/test.csv", header=T)
test.data <- cbind(Id=test.data[, 1], Hazard=-1, test.data[, -1])
# temp: add testing data to training data so that min,max values etc are
# the same in training and in the final prediction for normalisation.
train <- rbind(test.data, train)
# remove columns of apparent no information value (see random forest graph)
# T2_V10, T2_V7, T1_V13, T1_V10
train <- train[, -which(colnames(train) %in% c("T2_V10", "T2_V7", "T1_V13", "T1_V10"))]
# T1_V1: uniformish. 1:19. cant be a count of something: would expect more at 1, less at 19.
# could be randomised (hense uniform appearance). normalise for now. could try seperating into
# boolean variables or re-ordering by sum (based on assumption that data has been randomised.)
train$T1_V1 <- norml.range(train$T1_V1, -3, 3)
# T1_V2: 1:24, same as ^ ;although seems to be a bias towards 7 and 18.
train$T1_V2 <- norml.range(train$T1_V2, -3, 3)
# T1_V3: 1:9, distribution is apparent: exponential? decay from 1:9.
train$T1_V3 <- norml.range(train$T1_V3, -3, 3)
# T1_V4: 8 factors: B C E G H N S W
# N is most frequent. N E S W, north, east, south, west?
# re-ordering by frequency or creating new boolean features may help..
# normalise for now.
train$T1_V4 <- norml.range(as.integer(train$T1_V4), -3, 3)
# T1_V5: 10 factors: A B C D E H I J K L
# normalise for now.
train$T1_V5 <- norml.range(as.integer(train$T1_V5), -3, 3)
# T1_V6: 2 factors: Y N
# yes or no.
# no slightly more popular.
# normalise.
train$T1_V6 <- norml.range(as.integer(train$T1_V6), -3, 3)
# T1_V7: 4 factors: A B C D
# normalise
train$T1_V7 <- norml.range(as.integer(train$T1_V7), -3, 3)
# T1_V8: 4 factors: A B C D
# normalise
train$T1_V8 <- norml.range(as.integer(train$T1_V8), -3, 3)
# T1_V9: 6 factors: B C D E F G
# normalise
train$T1_V9 <- norml.range(as.integer(train$T1_V9), -3, 3)
# T1_V10: 2:12.
# missing 4,5,6 and 9,10,11
# normalise.
#train$T1_V10 <- norml.range(train$T1_V10, 0.1, 0.9)
# T1_V11: 12 factors:
# A B D E F H I J K L M N
# 1556 17047 258 450 544 15381 1364 6197 239 7003 541 419
# normalise
train$T1_V11 <- norml.range(as.integer(train$T1_V11), -3, 3)
# T1_V12, 4 factors:
# A B C D
# 1130 46900 1395 1574
train$T1_V12 <- norml.range(as.integer(train$T1_V12), -3, 3)
# T1_V13. 5,10,15 or 20.
#train$T1_V13 <- norml.range(train$T1_V13, 0.1, 0.9)
# T1_V14, 0,1,2,3,4 (hardly any 0s)
train$T1_V14 <- norml.range(train$T1_V14, -3, 3)
# T1_V15:
# A C D F H N S W
# 45680 1652 758 85 524 1879 191 230
train$T1_V15 <- norml.range(as.integer(train$T1_V15), -3, 3)
# T1_V16:
# A B C D E F G H I J K L M N O P Q R
# 2705 8933 808 1397 2599 187 484 408 9331 2410 8159 729 1264 2277 152 459 358 8339
train$T1_V16 <- norml.range(as.integer(train$T1_V16), -3, 3)
# T1_V17:
# N Y
# 41183 9816
train$T1_V17 <- norml.range(as.integer(train$T1_V17), -3, 3)
# T2_V1 (1:100) ascending distribution. house age?
train$T2_V1 <- norml.range(train$T2_V1, -3, 3)
# T2_V2 (1:39) distribution. peaks at 8, then exponential decay toward 39.
train$T2_V2 <- norml.range(train$T2_V2, -3, 3)
# T2_V3
# N Y
# 34548 16451
train$T2_V3 <- norml.range(as.integer(train$T2_V3), -3, 3)
# T2_V4
# distribution with peaks at 5 and 12
# 1:22
train$T2_V4 <- norml.range(train$T2_V4, -3, 3)
# T2_V5
# A B C D E F
# 33845 11201 5013 515 412 13
train$T2_V5 <- norml.range(as.integer(train$T2_V5), -3, 3)
# T2_V6
# exponential distribution peaking at 2, decaying to 7
# 1:7
train$T2_V6 <- norml.range(train$T2_V6, -3, 3)
# T2_V7
# 7 possible values:
# 22+cumsum(rep(3,7))-3
# 22 25 28 31 34 37 40.
# ascending distribution.
#train$T2_V7 <- norml.range(train$T2_V7, 0.1, 0.9)
# T2_V8 1,2,3
train$T2_V8 <- norml.range(train$T2_V8, -3, 3)
# T2_V9 distribution 1:25
train$T2_V9 <- norml.range(train$T2_V9, -3, 3)
# T2_V10 distribution 1:7
#train$T2_V10 <- norml.range(train$T2_V10, 0.1, 0.9)
# T2_V11 yes,no
train$T2_V11 <- norml.range(as.integer(train$T2_V11), -3, 3)
# T2_V12 yes,no
train$T2_V12 <- norml.range(as.integer(train$T2_V12), -3, 3)
# T2_V13
# A B C D E
# 10260 514 7507 5084 27634
train$T2_V13 <- norml.range(as.integer(train$T2_V13), -3, 3)
# T2_V14 distribution 1:7
# peak at 2, decay toward 7.
train$T2_V14 <- norml.range(train$T2_V14, -3, 3)
# T2_V15 distribtion. looks cyclic. 1:12 (months?)
# peak at 1.
train$T2_V15 <- norml.range(train$T2_V15, -3, 3)
# output/target
#train$Hazard <- train$Hazard/100
# ^ squashes close to 0 (0.01), best normalise within [0.1, 0.9]
# (assume min and max value of [1, 100])
train$Hazard <- norml.range(train$Hazard, from=0.1, to=0.9, minv=1, maxv=100)
# extract the testing data (can also id it by negative Hazard)
test.data <- head(train, nrow(test.data))
train <- tail(train, -nrow(test.data))
# randomise order of training data
train <- train[sample(1:nrow(train)), ]
# hold 10% of data back for validation (not used in training or model testing)
if(valid.ratio > 0) {
valid <- head(train, as.integer(nrow(train)*valid.ratio))
train <- tail(train, nrow(train)-as.integer(nrow(train)*valid.ratio))
}
# spit it out.
write.table(train[, c(3:ncol(train), 2)], "/tmp/train_data.csv", quote=F, row.names=F, col.names=F, sep=",")
if(valid.ratio > 0)
write.table(valid[, c(3:ncol(valid), 2)], "/tmp/valid_data.csv", quote=F, row.names=F, col.names=F, sep=",")
write.table(test.data[, c(3:ncol(test.data), 2)], "/tmp/test_data.csv", quote=F, row.names=F, col.names=F, sep=",")
######### external: use neural-network-light
load.model <- function() {
# look at errors
errors <<- read.csv("/tmp/errors.csv", header=F)
par(mfrow=c(1, 2))
plot(errors[, 1], type="s", main="training error", xlab="epoch", ylab="error")
plot(errors[, 2], type="s", main="testing error", xlab="epoch", ylab="error")
### look at model output
# training error/fit
train.out <<- read.csv("/tmp/train_out.csv", header=F)
train.out <<- norml.range.rev(train.out[, 1], 1, 100, 0.1, 0.9) # 100*train.out[, 1]
train.des <<- as.integer(round(norml.range.rev(train$Hazard, 1, 100, 0.1, 0.9), 1)) # as.integer(100*train$Hazard)
if(valid.ratio > 0) {
valid.out <<- read.csv("/tmp/valid_out.csv", header=F)
valid.out <<- norml.range.rev(valid.out[, 1], 1, 100, 0.1, 0.9) # 100*valid.out[, 1]
valid.des <<- as.integer(round(norml.range.rev(valid$Hazard, 1, 100, 0.1, 0.9), 1)) # as.integer(100*valid$Hazard)
}
}
check.fit <- function() {
print(paste("mae|rmse|gini training =", round(mae(train.out, train.des), 6),
round(rmse(train.out, train.des), 6),
round(NormalizedGini(train.des, train.out), 6)))
if(valid.ratio > 0) {
# validation + check gini to give an expectation of kaggle rank
print(paste("mae|rmse|gini validation =", round(mae(valid.out, valid.des), 6),
round(rmse(valid.out, valid.des), 6),
round(NormalizedGini(valid.des, valid.out), 6)))
}
}
kaggle <- function() {
#### make kaggle predictions
test.out <- read.csv("/tmp/test_out.csv", header=F)
test.out <- cbind(Id=test.data$Id, Hazard=norml.range.rev(test.out[, 1], 1, 100, 0.1, 0.9)) # 100*test.out[, 1])
write.table(test.out, "/tmp/kaggle_out.csv", quote=F, row.names=F, col.names=T, sep=",")
}
# for single model
if(F) {
load.model()
check.fit()
}
# for k-fold models
load.models.k <- function(k) {
train.dat <<- NULL
valid.dat <<- NULL
test.dat <<- NULL
errors.train.dat <<- NULL
errors.test.dat <<- NULL
for(i in 1:k) {
train.csv <- read.csv(paste0("/tmp/train_out_", sprintf("%02d", i), ".csv"), header=F)
valid.csv <- read.csv(paste0("/tmp/valid_out_", sprintf("%02d", i), ".csv"), header=F)
test.csv <- read.csv(paste0("/tmp/test_out_", sprintf("%02d", i), ".csv"), header=F)
errors.csv <- read.csv(paste0("/tmp/errors_", sprintf("%02d", i), ".csv"), header=F)
train.dat <<- rbind(train.dat, t(train.csv))
valid.dat <<- rbind(valid.dat, t(valid.csv))
test.dat <<- rbind(test.dat, t(test.csv))
errors.train.dat <<- rbind(errors.train.dat, t(errors.csv[, 1]))
errors.test.dat <<- rbind(errors.test.dat, t(errors.csv[, 2]))
}
train.dat <<- as.matrix(t(train.dat))
valid.dat <<- as.matrix(t(valid.dat))
test.dat <<- as.matrix(t(test.dat))
errors.train.dat <<- as.matrix(t(errors.train.dat))
errors.test.dat <<- as.matrix(t(errors.test.dat))
}
# ensemble
if(F) {
ve <- norml.range.rev(rowMeans(valid.dat), 1, 100, 0.1, 0.9)
te <- cbind(Id=test.data$Id, Hazard=norml.range.rev(rowMeans(test.dat), 1, 100, 0.1, 0.9))
valid.des <- as.integer(round(norml.range.rev(valid$Hazard, 1, 100, 0.1, 0.9), 1))
print(paste("gini =", round(NormalizedGini(valid.des, ve), 6)))
write.table(te, "/tmp/kaggle_ensemble.csv", quote=F, row.names=F, col.names=T, sep=",")
}
if(F) {
# check structure generalisation
x <- read.csv("/tmp/search.csv", header=F)
print(paste("optimal hidden nodes =", which.min(tapply(x[, 3], x[, 1], mean))))
# 4 0.03229
# 6 0.03237
# 8 0.03243
}
test.out <- read.csv("/tmp/test_out_32.csv", header=F)
test.out <- cbind(Id=test.data$Id, Hazard=norml.range.rev(test.out[, 1], 1, 100, 0.1, 0.9)) # 100*test.out[, 1])
write.table(test.out, "/tmp/kaggle_out_32.csv", quote=F, row.names=F, col.names=T, sep=",")
test.out <- read.csv("/tmp/test_out_27.csv", header=F)
test.out <- cbind(Id=test.data$Id, Hazard=norml.range.rev(test.out[, 1], 1, 100, 0.1, 0.9)) # 100*test.out[, 1])
write.table(test.out, "/tmp/kaggle_out_27.csv", quote=F, row.names=F, col.names=T, sep=",")
test.out <- read.csv("/tmp/test_out_19.csv", header=F)
test.out <- cbind(Id=test.data$Id, Hazard=norml.range.rev(test.out[, 1], 1, 100, 0.1, 0.9)) # 100*test.out[, 1])
write.table(test.out, "/tmp/kaggle_out_19.csv", quote=F, row.names=F, col.names=T, sep=",")
test.out <- read.csv("/tmp/test_out_22.csv", header=F)
test.out <- cbind(Id=test.data$Id, Hazard=norml.range.rev(test.out[, 1], 1, 100, 0.1, 0.9)) # 100*test.out[, 1])
write.table(test.out, "/tmp/kaggle_out_22.csv", quote=F, row.names=F, col.names=T, sep=",")
|
6268f242bceecb05e35bc41fc6b7578b9065ffa6
|
cb8470ace865d214643ad99b8d6538f364a651a0
|
/inst/extdata/article/repeat_tracks.R
|
72daafc22ed6f0d42303f3a08eb58628825b63bd
|
[] |
no_license
|
sonthuybacha/soysambu
|
edac3fc7a0c5831c8c6c1ac8059ff57d179d1828
|
67d691cd09359b0b76a71259f09d53cf030447eb
|
refs/heads/master
| 2020-09-22T02:49:50.310680
| 2019-05-22T04:32:08
| 2019-05-22T04:32:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,883
|
r
|
repeat_tracks.R
|
pacman::p_load(spatstat,Soysambu,sf)
pplot=curry(plot.ppp,use.marks=F,main="")
# split transects --------------------------------------
s=sf_dt(gps_tracks)
s[, trackdate := lubridate::ymd_hms(time) %>% as.Date]
tr=ppp(x=s$lon,y=s$lat,window=Window(snares))
# split by track
marks(tr)=factor(s$track_nr)
ss=split(tr)
# plot(ss)
# make ppp of tracks with track nr and date ------------
tr=unmark(tr)
marks(tr)=data.frame(
trackdate=factor(s$trackdate),
tracktime=s$time,
tracknr=s$track_nr,
name=s$new_name
)
st=split(tr,"trackdate",un=F)
# isolate hotspots -------------------------------------
# each site must have been visited 4 times on 4 different dates
f=function(p) {
x=ppp_dt(p)
y=unique(x$trackdate)
z=unique(x$tracknr)
cat("trackdates:", paste(y,collapse=", "), "\n")
cat("tracknrs:", paste(z,collapse=", "))
}
# with `identify(tx)`
# quarry
a=st[["2018-12-06"]]
b=st[["2019-01-18"]]
c=st[["2019-03-30"]]
d=st[["2019-03-27"]]
a=subset(a,tracknr==3 & hour(tracktime) <=7)
b=subset(b,name=="Quarry")
c=subset(c, as.numeric(tracktime) < 1553934617)
d=subset(d,name=="Quarry")
quarry=superimpose(a,b,c,d)
# Jolai Gate
e=st[["2019-03-27"]]
f=st[["2019-03-29"]]
g=st[["2019-03-30"]]
h=st[["2019-01-24"]]
e1=subset(e,as.numeric(tracktime) <= 1553677995)
e2=subset(e1, name != "Quarry")
f1=subset(f,as.numeric(tracktime) <=1553853727)
f2=subset(f1,as.numeric(tracktime) > 1553847325)
g1=subset(g, as.numeric(tracktime) >= 1553934617)
h1=subset(h, name=='Jolai Gate')
jolai_gate=superimpose(e2,g1,h1)
# Serena
i=st[["2019-01-17"]]
j=st[["2019-03-27"]]
k=st[["2019-03-29"]]
l=st[["2019-04-01"]]
i1=subset(i, as.numeric(tracktime) <= 1547712785)
j1=subset(j, as.numeric(tracktime) >= 1553705672)
k1=subset(k, as.numeric(tracktime) >= 1553861311)
l1=subset(l, name=="Serena B")
serena=superimpose(i1,j1,k1,l1)
# pplot(serena)
# extract from snares database
data(snares)
# make data.table out of snares
s=ppp_dt(snares)
# make data.table out of tracks
serena.df=ppp_dt(serena)[, label := "Serena"]
jolai.df=ppp_dt(jolai_gate)[, label := "Jolai Gate"]
quarry.df=ppp_dt(quarry)[, label := "Quarry"]
df=rbind(serena.df,jolai.df,quarry.df)
df[, name := NULL]
df[, idx := 1:.N,label]
# find seperate hotspots
serena.win=convexhull.xy(df[label=="Serena"])
jolai.win=convexhull.xy(df[label=="Jolai Gate"])
quarry.win=convexhull.xy(df[label=="Quarry"])
# make ppp
serena.s=snares[serena.win]
jolai.s=snares[jolai.win]
quarry.s=snares[quarry.win]
# split ppp on date
marks(jolai.s)$date=factor(marks(jolai.s)$date)
jolai.split=split(jolai.s, "date")
marks(serena.s)$date=factor(marks(serena.s)$date)
serena.split=split(serena.s, "date")
marks(quarry.s)$date=factor(marks(quarry.s)$date)
quarry.split=split(quarry.s, "date")
# plot(unmark(jolai.split))
# plot(unmark(serena.split))
# plot(jolai.s,which.marks="date")
serena.ss=subset(serena.s,date != "2019-03-29")
marks(serena.ss)$date=droplevels(marks(serena.ss)$date)
# plot(serena.ss,which.marks="date")
# plot(serena.ss %mark% nndist(serena.ss),markscale=0.3)
# plot(jolai.s %mark% nndist(jolai.s), markscale=0.3)
marks(snares)$datex=marks(snares)$date=factor(marks(snares)$date)
split(snares,"datex") %>% plot(use.marks=F)
serena=subset(snares, datex %in% c("2018-12-05","2019-01-17","2019-03-27"))
plot(snares, use.marks=F)
plotstuff()
# a=clickbox()
a=structure(list(type = "rectangle", xrange = c(860425.277862241,
861286.980503061), yrange = c(9955395.72812958, 9956372.85514261
), units = structure(list(singular = "unit", plural = "units",
multiplier = 1), class = "unitname")), class = "owin")
snares[a] %>% plot(use.marks=F,pch="+")
serena=snares[a]
serena=subset(serena,datex %in% c("2018-12-05","2019-01-17","2019-03-27"))
marks(serena)$datex=droplevels(marks(serena)$datex)
plot(serena,which.marks="datex",main="")
chs=convexhull.xy(serena)
plot(chs,add=T,border="lightgrey",main="")
round(area(chs)/(100*100)) # area in hectare
jolai.gate=subset(snares, datex %in% c("2019-03-26","2019-03-27","2019-03-29","2019-01-24"))
# b=clickbox()
b=structure(list(type = "rectangle", xrange = c(855345.956837355,
856143.214070323), yrange = c(9945580.51526661, 9946647.26790227
), units = structure(list(singular = "unit", plural = "units",
multiplier = 1), class = "unitname")), class = "owin")
jolai.gate=snares[b]
marks(jolai.gate)$datex=droplevels(marks(jolai.gate)$datex)
plot(jolai.gate, which.marks="datex",main="")
chj=convexhull.xy(jolai.gate)
plot(chj,add=T,border="lightgrey", main="")
round(area(chj)/(100*100)) # area in hectare
|
226d956770a1f97c0e71359fe01bf06cbbc1d470
|
6ed74fa0cf68e3a16c101f89a14824e8a950002b
|
/Demoscripts/HadoopIOQ_Tests.R
|
1125787064c732ccb90a408a19059ec67f5f4f19
|
[
"MIT"
] |
permissive
|
dem108/Revolution-R-Enterprise-for-Academic-Users
|
5956d16c5ea5301f4d6d66341a0009e5a2adeb56
|
dd3b5de7a96d2c9d2515edeac739e12d6d7ca7b0
|
refs/heads/master
| 2021-01-24T20:46:37.489023
| 2014-06-04T18:30:15
| 2014-06-04T18:30:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,642
|
r
|
HadoopIOQ_Tests.R
|
############################################################################################
# Copyright (C) 2013 Revolution Analytics, Inc
############################################################################################
#
# Run tests using both RxHadoopMR and local compute contexts
#
############################################################################################
# HadoopIOQ_Setup.R creates the hadoopTestInfoList required by these functions
#############################################################################################
# Function to get compute contexts
#############################################################################################
"getHadoopComputeContext" <- function()
{
RxHadoopMR(
nameNode = hadoopTestInfoList$myNameNode,
hdfsShareDir = hadoopTestInfoList$myHdfsShareDir,
shareDir = hadoopTestInfoList$myShareDir,
sshUsername = hadoopTestInfoList$mySshUsername,
sshHostname = hadoopTestInfoList$mySshHostname,
sshSwitches = hadoopTestInfoList$mySshSwitches,
wait = TRUE,
consoleOutput = hadoopTestInfoList$myConsoleOutput,
fileSystem = hadoopTestInfoList$myHdfsFS,
sshProfileScript = hadoopTestInfoList$mySshProfileScript)
}
"getLocalComputeContext" <- function()
{
RxLocalSeq()
}
#############################################################################################
# Functions to get data sources
#############################################################################################
"getAirDemoTextHdfsDS" <- function()
{
airColInfo <- list(DayOfWeek = list(type = "factor",
levels = c("Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday", "Sunday")))
# In .setup, a directory 'AirlineDemoSmallXdf' will be created with the file
RxTextData(file = file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsAirDemoCsvSubdir),
missingValueString = "M",
colInfo = airColInfo,
fileSystem = hadoopTestInfoList$myHdfsFS)
}
"getAirDemoXdfHdfsDS" <- function()
{
# In the first test a directory 'IOQ\AirlineDemoSmallXdf' will be created with a composite xdf file
RxXdfData( file = file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsAirDemoXdfSubdir),
fileSystem = hadoopTestInfoList$myHdfsFS )
}
"getTestOutFileHdfsDS" <- function( testFileDirName )
{
RxXdfData( file = file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsTestOuputSubdir, testFileDirName),
fileSystem = hadoopTestInfoList$myHdfsFS )
}
"removeTestOutFileHdfsDS" <- function( testDS )
{
rxSetComputeContext(getHadoopComputeContext())
rxHadoopRemoveDir(testDS@file)
rxSetComputeContext(getLocalComputeContext())
}
"getAirDemoTextLocalDS" <- function()
{
airColInfo <- list(DayOfWeek = list(type = "factor",
levels = c("Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday", "Sunday")))
RxTextData(file = file.path(rxGetOption("sampleDataDir"), "AirlineDemoSmall.csv"),
colInfo = airColInfo,
missingValueString = "M",
fileSystem = "native")
}
"getAirDemoXdfLocalDS" <- function()
{
RxXdfData(file = file.path(rxGetOption("sampleDataDir"), "AirlineDemoSmall.xdf"),
fileSystem = "native")
}
"getTestOutFileLocalDS" <- function( testFileDirName )
{
RxXdfData( file = file.path(hadoopTestInfoList$localTestDataDir,
testFileDirName),
createCompositeSet = TRUE,
fileSystem = "native")
}
"removeTestOutFileLocalDS" <- function( testDS )
{
unlink(testDS@file, recursive = TRUE)
}
#############################################################################################
# Tests to put data on Hadoop cluster
#############################################################################################
"test.hadoop.aaa.initializeHadoop" <- function()
{
rxSetComputeContext(getHadoopComputeContext())
# Get the curent version from the Hadoop cluster
"getRevoVersion" <- function()
{
Revo.version
}
versionOutput <- rxExec(getRevoVersion)
print(paste("RRE version on Hadoop cluster:", versionOutput$rxElem1$version.string))
output1 <- capture.output(
rxHadoopListFiles(hadoopTestInfoList$myHdfsShareDirRoot)
)
hasUserDir <- grepl(hadoopTestInfoList$myHdfsShareDir, output1)
print("Hadoop Compute Context being used:")
print(getHadoopComputeContext())
if (sum(hasUserDir) == 0)
{
print("WARNING: myHdfsSharDir does not exist or problem with compute context.")
}
if (hadoopTestInfoList$createHadoopData )
{
# Make directory for writing temporary test data
testOutputDir <- file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsTestOuputSubdir)
rxHadoopMakeDir(testOutputDir)
# Copy the AirlineDemoSmall.csv file from local machine to HDFS
# File is automatically installed locally with RRE
airDemoSmallLocal <- file.path(rxGetOption("sampleDataDir"), "AirlineDemoSmall.csv")
# Destination path for file in Hadoop
airDemoSmallCsvHdfsPath <- file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsAirDemoCsvSubdir)
# Remove the directory if it's already there
#rxHadoopListFiles(airDemoSmallCsvHdfsPath)
print("Trying to remove directories in case they already exist.")
rxHadoopRemoveDir(airDemoSmallCsvHdfsPath)
# Create the directory
rxHadoopMakeDir(airDemoSmallCsvHdfsPath)
# Copy the file from local client
rxHadoopCopyFromClient(source = airDemoSmallLocal,
nativeTarget = hadoopTestInfoList$myHdfsNativeDir,
hdfsDest = airDemoSmallCsvHdfsPath,
computeContext = getHadoopComputeContext(),
sshUsername = hadoopTestInfoList$mySshUsername,
sshHostname = hadoopTestInfoList$mySshHostname,
sshSwitches = hadoopTestInfoList$mySshSwitches)
#rxHadoopListFiles(airDemoSmallCsvHdfsPath)
# Import the csv file to a composite xdf file in HDFS
airDemoSmallXdfHdfsPath <- file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsAirDemoXdfSubdir)
rxHadoopRemoveDir(airDemoSmallXdfHdfsPath) # Remove directory if it's already there
rxImport(inData = getAirDemoTextHdfsDS(), outFile = getAirDemoXdfHdfsDS(),
rowsPerRead = 200000, overwrite = TRUE)
#rxHadoopListFiles(airDemoSmallXdfHdfsPath)
}
rxSetComputeContext(getLocalComputeContext())
}
#############################################################################################
# Tests for getting file information
#############################################################################################
"test.hadoop.rxGetInfo" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# Test simple case
rxSetComputeContext( getLocalComputeContext() )
localInfo <- rxGetInfo( data = getAirDemoXdfLocalDS())
rxSetComputeContext( getHadoopComputeContext() )
hadoopInfo <- rxGetInfo( data = getAirDemoXdfHdfsDS() )
checkEquals(localInfo$numRows, hadoopInfo$numRows)
checkEquals(localInfo$numVars, hadoopInfo$numVars)
# Test with arguments
rxSetComputeContext( getLocalComputeContext() )
localInfoArgs <- rxGetInfo( data = getAirDemoXdfLocalDS(),
getVarInfo = TRUE, getBlockSizes = TRUE, numRows = 5)
rxSetComputeContext( getHadoopComputeContext() )
hadoopInfoArgs <- rxGetInfo( data = getAirDemoXdfHdfsDS(),
getVarInfo = TRUE, getBlockSizes = TRUE, numRows = 5)
checkIdentical(localInfoArgs$varInfo, hadoopInfoArgs$varInfo)
# This will only be equal if there is only one composite data file
checkEquals(localInfoArgs$rowsPerBlock, hadoopInfoArgs$rowsPerBlock)
checkIdentical(localInfoArgs$data, hadoopInfoArgs$data)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxGetInfo.data.csv" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# Test using Text Data Sources
rxSetComputeContext( getLocalComputeContext() )
localInfoArgs <- rxGetInfo( data = getAirDemoTextLocalDS(),
getVarInfo = TRUE, getBlockSizes = TRUE, numRows = 5)
rxSetComputeContext( getHadoopComputeContext() )
hadoopInfoArgs <- rxGetInfo( data = getAirDemoTextHdfsDS(),
getVarInfo = TRUE, getBlockSizes = TRUE, numRows = 5)
checkIdentical(localInfoArgs$varInfo, hadoopInfoArgs$varInfo)
# This will only be equal if read from a small file - otherwise order may vary
checkIdentical(localInfoArgs$data, hadoopInfoArgs$data)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxGetVarInfo" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
rxSetComputeContext( getLocalComputeContext() )
localVarInfo <- rxGetVarInfo( getAirDemoXdfLocalDS() )
rxSetComputeContext( getHadoopComputeContext() )
hadoopVarInfo <- rxGetVarInfo( getAirDemoXdfHdfsDS() )
checkIdentical(localVarInfo, hadoopVarInfo)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxGetVarNames" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
rxSetComputeContext( getLocalComputeContext() )
localVarNames <- rxGetVarNames( getAirDemoXdfLocalDS() )
rxSetComputeContext( getHadoopComputeContext() )
hadoopVarNames <- rxGetVarNames( getAirDemoXdfHdfsDS() )
checkEquals(localVarNames, hadoopVarNames)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxLocateFile" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# Local tests
rxSetComputeContext( getLocalComputeContext() )
# CSV file
dataSource <- getAirDemoTextLocalDS()
localFileName <- basename(dataSource@file)
localDir <- dirname(dataSource@file)
localLocated <- rxLocateFile(file = localFileName, pathsToSearch = localDir)
checkEquals(normalizePath(localLocated), normalizePath(dataSource@file))
dataSource <- getAirDemoXdfLocalDS()
localFileName <- basename(dataSource@file)
localDir <- dirname(dataSource@file)
localLocated <- rxLocateFile(file = localFileName, pathsToSearch = localDir)
checkEquals(normalizePath(localLocated), normalizePath(dataSource@file))
# Hadoop
# Csv
rxSetComputeContext( getHadoopComputeContext() )
dataSource <- getAirDemoTextHdfsDS()
hadoopFileName <- basename(dataSource@file)
hadoopDir <- dirname(dataSource@file)
hadoopLocated <- rxLocateFile(file = hadoopFileName, pathsToSearch = hadoopDir, defaultExt = "", fileSystem = dataSource@fileSystem)
checkEquals(hadoopLocated, file.path(dataSource@file, "AirlineDemoSmall.csv"))
# Xdf
dataSource <- getAirDemoXdfHdfsDS()
hadoopFileName <- basename(dataSource@file)
hadoopDir <- dirname(dataSource@file)
hadoopLocated <- rxLocateFile(file = hadoopFileName, pathsToSearch = hadoopDir, defaultExt = ".xdf", fileSystem = dataSource@fileSystem)
checkEquals(hadoopLocated, dataSource@file)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxReadXdf" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
rxSetComputeContext( getLocalComputeContext() )
localDataFrame <- rxReadXdf( getAirDemoXdfLocalDS() )
rxSetComputeContext( getHadoopComputeContext() )
hadoopDataFrame <- rxReadXdf( getAirDemoXdfHdfsDS() )
checkIdentical(localDataFrame, hadoopDataFrame)
rxSetComputeContext( getLocalComputeContext() )
}
#############################################################################################
# Tests for plotting functions
#############################################################################################
"test.hadoop.rxHistogram" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
rxSetComputeContext( getLocalComputeContext() )
localHistogram <- rxHistogram(~ArrDelay, data = getAirDemoXdfLocalDS(),
rowSelection = ArrDelay > 0, title = "Local Histogram" )
rxSetComputeContext( getHadoopComputeContext() )
hadoopHistogram <- rxHistogram(~ArrDelay, data = getAirDemoXdfHdfsDS(),
rowSelection = ArrDelay > 0, title = "Hadoop Histogram" )
checkIdentical(localHistogram$panel.args, hadoopHistogram$panel.args)
checkIdentical(localHistogram$call, hadoopHistogram$call)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxLinePlot" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
rxSetComputeContext( getLocalComputeContext() )
localLinePlot <- rxLinePlot(ArrDelay~CRSDepTime, data = getAirDemoXdfLocalDS(),
rowSelection = ArrDelay > 60, type = "p", title = "Local Line Plot" )
rxSetComputeContext( getHadoopComputeContext() )
hadoopLinePlot <- rxLinePlot(ArrDelay~CRSDepTime,, data = getAirDemoXdfHdfsDS(),
rowSelection = ArrDelay > 60, type = "p", title = "Hadoop Line Plot" )
checkIdentical(localLinePlot$panel.args, hadoopLinePlot$panel.args)
hadoopCsvLinePlot <- rxLinePlot(ArrDelay~CRSDepTime,, data = getAirDemoTextHdfsDS(),
rowSelection = ArrDelay > 60, type = "p", title = "Hadoop Line Plot from Text File" )
checkIdentical(localLinePlot$panel.args, hadoopCsvLinePlot$panel.args)
rxSetComputeContext( getLocalComputeContext() )
}
#############################################################################################
# Tests for data step
#############################################################################################
"test.hadoop.rxDataStep.data.XdfToXdf" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# Run example locally
rxSetComputeContext( getLocalComputeContext() )
outLocalDS <- getTestOutFileLocalDS("DataStepTest")
rxDataStep(inData = getAirDemoXdfLocalDS(),
outFile = outLocalDS, overwrite = TRUE)
on.exit(removeTestOutFileLocalDS( outLocalDS ), add = TRUE)
localInfo <- rxGetInfo(data = outLocalDS, getVarInfo = TRUE)
rxSetComputeContext( getHadoopComputeContext() )
outHdfsDS <- getTestOutFileHdfsDS("DataStepTest")
rxDataStep(inData = getAirDemoXdfHdfsDS(),
outFile = outHdfsDS, overwrite = TRUE)
hadoopInfo = rxGetInfo(data = outHdfsDS, getVarInfo = TRUE)
checkEquals(localInfo$numRows, hadoopInfo$numRows)
checkIdentical(localInfo$varInfo, hadoopInfo$varInfo)
# Clean-up
removeTestOutFileHdfsDS( outHdfsDS )
rxSetComputeContext( getLocalComputeContext() )
removeTestOutFileLocalDS( outLocalDS )
}
"test.hadoop.rxDataStep.data.XdfToDataFrame" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
rxSetComputeContext( getLocalComputeContext() )
localDataFrame <- rxDataStep(inData = getAirDemoXdfLocalDS())
rxSetComputeContext( getHadoopComputeContext() )
hadoopDataFrame <- rxDataStep(inData = getAirDemoXdfHdfsDS())
checkIdentical(localDataFrame, hadoopDataFrame)
rxSetComputeContext( getLocalComputeContext() )
}
#############################################################################################
# Tests for analysis functions that read data
#############################################################################################
"test.hadoop.rxCube.data.xdf" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# rxCube with input xdf file
rxSetComputeContext( getLocalComputeContext() )
localCube <- rxCube(ArrDelay~DayOfWeek, data = getAirDemoXdfLocalDS())
rxSetComputeContext( getHadoopComputeContext() )
hadoopCube <- rxCube(ArrDelay~DayOfWeek, data = getAirDemoXdfHdfsDS() )
checkIdentical(localCube$Counts, hadoopCube$Counts)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxCube.data.data.frame" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
### rxCube with input data frame
rxSetComputeContext( getLocalComputeContext() )
localCube <- rxCube(~Species, data = iris)
rxSetComputeContext( getHadoopComputeContext() )
hadoopCube <- rxCube(~Species, data = iris)
checkIdentical(localCube$Counts, hadoopCube$Counts)
rxSetComputeContext( getLocalComputeContext())
}
"test.hadoop.rxCrossTabs" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# rxCrosstabs with input xdf file
rxSetComputeContext( getLocalComputeContext() )
localCrossTabs <- rxCrossTabs(ArrDelay10~DayOfWeek, data = getAirDemoXdfLocalDS(),
rowSelection = CRSDepTime > 10, transforms = list(ArrDelay10 = ArrDelay*10) )
rxSetComputeContext( getHadoopComputeContext() )
hadoopCrossTabs <- rxCrossTabs(ArrDelay10~DayOfWeek, data = getAirDemoXdfHdfsDS(),
rowSelection = CRSDepTime > 10, transforms = list(ArrDelay10 = ArrDelay*10) )
checkIdentical(localCrossTabs$sums, hadoopCrossTabs$sums)
checkIdentical(localCrossTabs$counts, hadoopCrossTabs$counts)
checkIdentical(localCrossTabs$chisquare, hadoopCrossTabs$chisquare)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxSummary" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# rxSummary with input xdf file
rxSetComputeContext( getLocalComputeContext() )
localSummary <- rxSummary(ArrDelay10~DayOfWeek, data = getAirDemoXdfLocalDS(),
rowSelection = CRSDepTime > 10, transforms = list(ArrDelay10 = 10*ArrDelay))
rxSetComputeContext( getHadoopComputeContext() )
hadoopSummary <- rxSummary(ArrDelay10~DayOfWeek, data = getAirDemoXdfHdfsDS(),
rowSelection = CRSDepTime > 10, transforms = list(ArrDelay10 = 10*ArrDelay))
checkEquals(localSummary$categorical[[1]], hadoopSummary$categorical[[1]])
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxQuantile" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# rxQuantile with input xdf file
rxSetComputeContext( getLocalComputeContext() )
localQuantile <- rxQuantile("ArrDelay", data = getAirDemoXdfLocalDS())
rxSetComputeContext( getHadoopComputeContext() )
hadoopQuantile <- rxQuantile("ArrDelay", data = getAirDemoXdfHdfsDS())
checkEquals(localQuantile, hadoopQuantile)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxLinMod" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# rxLinMod with input xdf file
rxSetComputeContext( getLocalComputeContext() )
localLinMod <- rxLinMod(ArrDelay~DayOfWeek, data = getAirDemoXdfLocalDS(),
rowSelection = CRSDepTime > 10)
rxSetComputeContext( getHadoopComputeContext() )
hadoopLinMod <- rxLinMod(ArrDelay~DayOfWeek, data = getAirDemoXdfHdfsDS(),
rowSelection = CRSDepTime > 10)
checkEquals(localLinMod$coefficients, hadoopLinMod$coefficients)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxLinMod.data.csv" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# rxLinMod with input xdf file
rxSetComputeContext( getLocalComputeContext() )
localLinMod <- rxLinMod(ArrDelay~DayOfWeek, data = getAirDemoTextLocalDS(),
rowSelection = CRSDepTime > 10)
rxSetComputeContext( getHadoopComputeContext() )
hadoopLinMod <- rxLinMod(ArrDelay~DayOfWeek, data = getAirDemoTextHdfsDS(),
rowSelection = CRSDepTime > 10)
checkEquals(localLinMod$coefficients, hadoopLinMod$coefficients)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxCovCor" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# rxCovCor with input xdf file
rxSetComputeContext( getLocalComputeContext() )
localCovCor <- rxCovCor(~ArrDelay+DayOfWeek+CRSDepTime10, data = getAirDemoXdfLocalDS(),
transforms = list(CRSDepTime10 = 10*CRSDepTime))
rxSetComputeContext( getHadoopComputeContext() )
hadoopCovCor <- rxCovCor(~ArrDelay+DayOfWeek+CRSDepTime10, data = getAirDemoXdfHdfsDS(),
transforms = list(CRSDepTime10 = 10*CRSDepTime))
checkEquals(localCovCor$CovCor, hadoopCovCor$CovCor)
checkEquals(localCovCor$StdDevs, hadoopCovCor$StdDevs)
checkEquals(localCovCor$Means, hadoopCovCor$Means)
rxSetComputeContext( getLocalComputeContext() )
}
"test.hadoop.rxGlm" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
# rxGlm with input xdf file
rxSetComputeContext( getLocalComputeContext() )
localRxGlm <- rxGlm(ArrOnTime ~ CRSDepTime, data = getAirDemoXdfLocalDS(),
family = binomial(link = "probit"), transforms = list(ArrOnTime = ArrDelay < 2))
rxSetComputeContext( getHadoopComputeContext() )
hadoopRxGlm <- rxGlm(ArrOnTime ~ CRSDepTime, data = getAirDemoXdfHdfsDS(),
family = binomial(link = "probit"), transforms = list(ArrOnTime = ArrDelay < 2))
checkEquals(localRxGlm$coefficients, hadoopRxGlm$coefficients)
checkEquals(localRxGlm$coef.std.error, hadoopRxGlm$coef.std.error)
checkEquals(localRxGlm$df, hadoopRxGlm$df)
rxSetComputeContext( getLocalComputeContext() )
}
#############################################################################################
# Tests for analysis functions that write data
#############################################################################################
"test.hadoop.rxLogit.rxPredict" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
### Local computations
rxSetComputeContext( getLocalComputeContext() )
inLocalDS <- getAirDemoXdfLocalDS()
outLocalDS <- getTestOutFileLocalDS("LogitPred")
# Logistic regression
localLogit <- rxLogit(ArrDel15~DayOfWeek, data = inLocalDS,
transforms = list(ArrDel15 = ArrDelay > 15), maxIterations = 6 )
# Predictions
localPredOut <- rxPredict(modelObject = localLogit, data = inLocalDS,
writeModelVars = TRUE, predVarNames = "delayPred1", outData = outLocalDS, overwrite = TRUE)
outLocalVarInfo <- rxGetVarInfo( outLocalDS )
### Hadoop computations
rxSetComputeContext( getHadoopComputeContext() )
inHadoopDS <- getAirDemoXdfHdfsDS()
outHadoopDS <- getTestOutFileHdfsDS("LogitPred")
# Logistic regression
hadoopLogit <- rxLogit(ArrDel15~DayOfWeek, data = inHadoopDS,
transforms = list(ArrDel15 = ArrDelay > 15), maxIterations = 6 )
checkEquals(hadoopLogit$coefficients, localLogit$coefficients)
checkEquals(hadoopLogit$coef.std.error, localLogit$coef.std.error)
# Predictions
hadoopPredOut <- rxPredict(modelObject = hadoopLogit, data = inHadoopDS,
writeModelVars = TRUE, predVarNames = "delayPred1", outData = outHadoopDS, overwrite = TRUE)
outHadoopVarInfo <- rxGetVarInfo( outHadoopDS )
checkEquals(outHadoopVarInfo$delayPred1, outLocalVarInfo$delayPred1)
checkEquals(outHadoopVarInfo$ArrDelay, outLocalVarInfo$ArrDelay)
# Clean-up
removeTestOutFileHdfsDS( outHadoopDS )
rxSetComputeContext( getLocalComputeContext() )
removeTestOutFileLocalDS( outLocalDS )
}
"test.hadoop.rxKmeans.args.outFile" <- function()
{
on.exit(rxSetComputeContext( getLocalComputeContext()), add = TRUE )
testCenters <- matrix(c(13, 20, 7, 17, 10), ncol = 1)
### Local computations
rxSetComputeContext( getLocalComputeContext() )
outLocalDS <- getTestOutFileLocalDS("KmeansOut")
localKmeans <- rxKmeans( ~CRSDepTime, data = getAirDemoXdfLocalDS(), outFile = outLocalDS,
centers = testCenters, writeModelVars = TRUE, overwrite = TRUE)
localInfo <- rxGetInfo(outLocalDS, getVarInfo = TRUE)
### Hadoop computations
rxSetComputeContext( getHadoopComputeContext() )
outHadoopDS <- getTestOutFileHdfsDS("KmeansOut")
hadoopKmeans <- rxKmeans( ~CRSDepTime, data = getAirDemoXdfHdfsDS(), outFile = outHadoopDS,
centers = testCenters, writeModelVars = TRUE, overwrite = TRUE)
hadoopInfo <- rxGetInfo(outHadoopDS, getVarInfo = TRUE)
# Comparison tests
checkEquals(hadoopInfo$numRows, localInfo$numRows)
checkIdentical(hadoopInfo$varInfo, localInfo$varInfo)
checkEquals(hadoopKmeans$centers, localKmeans$centers)
# Clean-up
removeTestOutFileHdfsDS( outHadoopDS )
rxSetComputeContext( getLocalComputeContext() )
removeTestOutFileLocalDS( outLocalDS )
}
"test.hadoop.rxDTree.regression" <- function()
{
# Purpose: Test basic regression tree functionality on Hadoop
formula <- as.formula( "ArrDelay ~ CRSDepTime + DayOfWeek" )
### Local computations
rxSetComputeContext( getLocalComputeContext() )
inLocalDS <- getAirDemoXdfLocalDS()
outLocalDS <- getTestOutFileLocalDS("DTreeOut")
localDTree <- rxDTree( formula, data = inLocalDS,
blocksPerRead = 30,
minBucket = 500,
maxDepth = 10,
cp = 0,
xVal = 0,
maxCompete = 0,
maxSurrogate = 0,
maxNumBins = 101,
verbose = 1,
reportProgress = 0 )
# Compute predictions
localPred <- rxPredict( localDTree, data = inLocalDS, outData = outLocalDS, overwrite = TRUE,
verbose = 1, reportProgress = 0 )
localPredSum <- rxSummary( ~ ArrDelay_Pred, data = outLocalDS )
### Hadoop computations
rxSetComputeContext( getHadoopComputeContext() )
inHadoopDS <- getAirDemoXdfHdfsDS()
outHadoopDS <- getTestOutFileHdfsDS("DTreeOut")
hadoopDTree <- rxDTree( formula, data = inHadoopDS,
blocksPerRead = 30,
minBucket = 500,
maxDepth = 10,
cp = 0,
xVal = 0,
maxCompete = 0,
maxSurrogate = 0,
maxNumBins = 101,
verbose = 1,
reportProgress = 0 )
# Compute predictions
hadoopPred <- rxPredict( hadoopDTree, data = inHadoopDS, outData = outHadoopDS, overwrite = TRUE,
verbose = 1, reportProgress = 0 )
hadoopPredSum <- rxSummary( ~ ArrDelay_Pred, data = outHadoopDS)
# Perform checks
#checkEquals( sum( ADSrxdt$splits[ , "improve" ] ), 0.181093099185139 )
checkEquals(sum(localDTree$splits[ , "improve" ] ), sum(hadoopDTree$splits[ , "improve" ] ))
localPrune <- prune(localDTree, cp = 1e-6 )
hadoopPrune <- prune(hadoopDTree, cp = 1e-6 )
checkEquals(sum(localPrune$splits[ , "improve" ] ), sum(hadoopPrune$splits[ , "improve" ] ))
checkEquals(localPredSum$sDataFrame, hadoopPredSum$sDataFrame)
# Clean-up
removeTestOutFileHdfsDS( outHadoopDS )
rxSetComputeContext( getLocalComputeContext() )
removeTestOutFileLocalDS( outLocalDS )
}
#############################################################################################
# rxExec Example
#############################################################################################
"test.hadoop.rxExec" <- function()
{
playCraps <- function()
{
result <- NULL
point <- NULL
count <- 1
while (is.null(result))
{
roll <- sum(sample(6, 2, replace=TRUE))
if (is.null(point))
{
point <- roll
}
if (count == 1 && (roll == 7 || roll == 11))
{
result <- "Win"
}
else if (count == 1 && (roll == 2 || roll == 3 || roll == 12))
{
result <- "Loss"
}
else if (count > 1 && roll == 7 )
{
result <- "Loss"
}
else if (count > 1 && point == roll)
{
result <- "Win"
}
else
{
count <- count + 1
}
}
result
}
### Local computations
rxSetComputeContext( getLocalComputeContext() )
localExec <- rxExec(playCraps, timesToRun=100, taskChunkSize=25)
rxSetComputeContext( getHadoopComputeContext() )
hadoopExec <- rxExec(playCraps, timesToRun=100, taskChunkSize=25)
checkEquals(length(localExec), length(hadoopExec))
checkEquals(length(localExec[[1]]), length(hadoopExec[[1]]))
}
#############################################################################################
# Always run last: Test to remove data from Hadoop cluster
#############################################################################################
"test.hadoop.zzz.removeHadoopData" <- function()
{
# Always remove temp data
rxSetComputeContext(getHadoopComputeContext())
testOutputDir <- file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsTestOuputSubdir)
rxHadoopRemoveDir(testOutputDir)
if (hadoopTestInfoList$removeHadoopDataOnCompletion)
{
# Remove air data from Hadoop cluster
airDemoSmallCsvHdfsPath <- file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsAirDemoCsvSubdir)
rxHadoopRemoveDir(airDemoSmallCsvHdfsPath)
airDemoSmallXdfHdfsPath <- file.path(hadoopTestInfoList$myHdfsShareDir,
hadoopTestInfoList$myHdfsAirDemoXdfSubdir)
rxHadoopRemoveDir(airDemoSmallXdfHdfsPath)
}
else
{
rxHadoopMakeDir(testOutputDir)
}
rxSetComputeContext(getLocalComputeContext())
}
|
0477218f11213ea0d2d415690e0e4851955430b9
|
60e96724924a3d2988c16a5b80106755e3471fc3
|
/20200607 Grafico animado.R
|
e03ffef0e8a8cdb04c98cf18ec1108d758360614
|
[] |
no_license
|
estadisticavlc/RetoGraficos
|
e96014b80dc4bd9666f300f8f6ab06078d49cd09
|
d116c0648fe9282881e222aee81fc46af253349a
|
refs/heads/master
| 2022-09-26T20:35:24.126578
| 2020-06-05T08:12:01
| 2020-06-05T08:12:01
| 262,992,341
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,102
|
r
|
20200607 Grafico animado.R
|
lista_de_paquetes <- c("curl", "gganimate","ggplot2","magick") # Definimos los paquetes que queremos cargar
paquetes_nuevos <- lista_de_paquetes[!(lista_de_paquetes %in% installed.packages()[,"Package"])] # Buscamos los paquetes que no tenemos
if(length(paquetes_nuevos)) install.packages(paquetes_nuevos) # Instalamos los paquetes que no tenemos
# Cargamos las librerías necesarias
library(curl)
library(gganimate)
library(ggplot2)
library(magick)
theme_set(theme_bw())
# Leemos los datos
datos=read.csv(curl("https://raw.githubusercontent.com/estadisticavlc/Datos/master/Indicadores.csv"),sep=",",dec=",")
valores<-as.numeric(as.character(datos$VALOR[datos$codigo=="Ind11"]))
fechas<-as.Date(paste0(as.character(datos$ANO[datos$codigo=="Ind11"]),"-",as.character(datos$MES[datos$codigo=="Ind11"]),"-15"))
datos_plot<-data.frame(fechas, valores)
datos_plot<-datos_plot[which(datos_plot$fechas<"2020-1-1"),]
datos_plot$anyo<-as.numeric(format(datos_plot$fecha, "%Y"))
# Dibujamos el gráfico animado
options(scipen=999)
p <- ggplot(datos_plot, aes(x = fechas, y=valores)) +
geom_line(show.legend = FALSE, alpha = 0.8, colour="steelblue", size=1) +
geom_point() +
scale_x_date(date_labels = "%m/%Y") +
scale_y_continuous(labels = function(x) format(x, big.mark = ".",
scientific = FALSE)) +
labs(title="Pernoctaciones en establecimientos hoteleros. 1999-2019",
x="",
y="",
caption="Fuente: Encuesta de Ocupación Hotelera (INE). Elaboración: Oficina de Estadística. Ajuntament de València.")+
theme(plot.title = element_text(size=20,hjust = 0.5,face="bold"),
axis.text=element_text(size=16),
axis.title=element_text(size=18,face="bold"),
plot.caption = element_text(color = "black",face = "italic", size = 12, hjust=0.5),
legend.position = "none") + transition_reveal(fechas)
animate(p, width = 900, height = 500)
anim_save("20200607 Grafico animado.gif")
|
6490f7b8993f45b9c388927c0a19e5af172a0e11
|
7b84d52058afecaee3e2ddc1c80a3949328e0529
|
/Plot1.R
|
70ecc18ae033ebbe42ec4dcaa7705fdd587b2b5c
|
[] |
no_license
|
NN123/ExData_Plotting1
|
ec555312ea3db4606f92de8f75e3d0071e85b5c4
|
1a283be343fd51e28feb996f542253662cf12f8f
|
refs/heads/master
| 2021-01-17T10:18:25.111871
| 2016-02-10T10:37:08
| 2016-02-10T10:37:08
| 51,430,154
| 0
| 0
| null | 2016-02-10T08:57:27
| 2016-02-10T08:57:27
| null |
UTF-8
|
R
| false
| false
| 847
|
r
|
Plot1.R
|
setwd("C:/Users/narayane/Documents/GitHub/Neena_Exploratory")
## Project 1 - EDA
library(data.table)
file1<-("./household_power_consumption.txt")
house_power<-read.table(text= grep("^[1,2]/2/2007",readLines(file1),value=TRUE), col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), sep = ";", header = TRUE)
# class(house_power$Time)
house_power$Datetime<-paste(house_power$Date,house_power$Time)
house_power$Datetime<-strptime(house_power$Datetime,"%d/%m/%Y %H:%M:%S")
# class(house_power$Datetime)
# tail(house_power$Datetime)
png("plot1.png", width=480, height=480, units="px")
hist(as.numeric(as.character(house_power$Global_active_power)), col="red", main="Global_active_power", xlab="Global_active_power (kilowatts)")
dev.off()
|
7848eca5426549e4e37f8179c61bd1c233f7a729
|
5fcde27b46bca2a87fd3607a378994c271f9df53
|
/best.R
|
fa2dacd7f3fcdc683a93a3f99bd57cb4b6c44d0b
|
[] |
no_license
|
dvsdimas/ProgrammingAssignment3
|
cf2aa0c8efefbaaf28028c815026605319512e77
|
449873acb609e1cf901fe72c1efd961947ad0323
|
refs/heads/master
| 2021-03-22T05:14:10.150980
| 2016-10-04T13:42:25
| 2016-10-04T13:42:25
| 69,997,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,063
|
r
|
best.R
|
best <- function(state, outcome) {
directory <- "data"
outcomes_data <- c("heart attack", "heart failure", "pneumonia")
for (letters in list.files()) {
if(identical(letters, directory)){
setwd(directory)
}
}
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character", na.strings = "Not Available")
if(!(state %in% outcome_data$State)) {
stop("invalid state")
}
if(!any(sapply(outcomes_data, function(x) identical(x, outcome)))) {
stop("invalid outcome")
}
outcome_column <- NULL
if(outcome == outcomes_data[1]) {
outcome_column <- 11
} else if(outcome == outcomes_data[2]) {
outcome_column <- 17
} else {
outcome_column <- 23
}
outcome_data_state <- outcome_data[outcome_data[,7] == state,]
min_indexes <- which.min(as.double(outcome_data_state[,outcome_column]))
sort(as.character(outcome_data_state[min_indexes, 2]))[1]
}
|
857d960ebe023966006d2adfb8b3a593c9604cfe
|
a46293558d70f0cd960bd63094b44e53a0e19da6
|
/main.R
|
25081dbb331ba1aab1478b2c197712478e7bcc2a
|
[] |
no_license
|
bigtongue5566/R_SOM
|
cc3334e5e40f432e921b4cf5a37966fee0692f96
|
657397b19dc6b13e4230d438c06d0f3927e558f1
|
refs/heads/master
| 2022-03-02T15:19:50.277641
| 2019-09-23T03:00:45
| 2019-09-23T03:00:45
| 110,215,285
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,394
|
r
|
main.R
|
set.seed(0)
calculate.nets <- function(input.vector,weights,x,y){
nets <- matrix(colSums((input.vector - weights)^2),x,y)
return(nets)
}
find.winning.node <- function(nets){
min.position <- which(nets == min(nets), arr.ind = TRUE)
return(min.position)
}
training <- function(data,xsize=10,ysize=10,training.rate=0.1,training.rate.shrink.rate=0.95,radius.shrink.rate=0.95) {
x <- xsize
y <- ysize
radius <- (x^2 + y^2)^0.5
average.total.distances <- c()
count <- 0
weights <- matrix(rep(0.5,x*y),ncol(data)) #matrix(round(runif(ncol(data)*x*y),2),ncol(data))
repeat {
output.topology <- matrix(rep(0,x*y),x,y)
count <- count + 1
total.distance <- 0
for (i in 1:nrow(data)) {
input.vector <- data[i,]
nets <- calculate.nets(input.vector,weights,x,y)
winning.node <- find.winning.node(nets)
#calculate total distance for error measure
total.distance <- total.distance + min(nets^0.5)
#calculate the output vector Y for each output layer
nets[] <- 0
nets[winning.node[1,'row'],winning.node[1,'col']] <- 1
#add minposition to output topology
output.topology[winning.node[1,'row'],winning.node[1,'col']] <- output.topology[winning.node[1,'row'],winning.node[1,'col']] + 1
#calculate delta weights
distance <- matrix(rep(0,x*y),x,y)
for (i in 1:x) {
for (j in 1:y) {
distance[i,j] <- ((i - winning.node[1,'row'])^2 + (j - winning.node[1,'col'])^2)^0.5
}
}
neighborhood <- exp(-distance/radius)
delta.weights = training.rate * t(t(input.vector - weights)*c(neighborhood))
#update connecting weight matrix
weights <- weights + delta.weights
}
radius <- radius * radius.shrink.rate
training.rate <- training.rate * training.rate.shrink.rate
average.total.distances[count] <- total.distance
if (count >= 300) {
break
}
}
return(list(output.topology,average.total.distances))
}
recalling <- function(radius.shrink.rate=0.95, training.rate=0.9, training.rate.shrink.rate=0.95, xsize=10, ysize=10, weights, test.data){
nets <- calculate.nets(test.data,weights,xsize,ysize)
winning.node <- find.winning.node(nets)
return(winning.node)
}
data.iris <- iris
iris.sepal.length <- scale(c(data.iris[,1]),center = FALSE)
iris.sepal.width <- scale(c(data.iris[,2]),center = FALSE)
iris.petal.length <- scale(c(data.iris[,3]),center = FALSE)
iris.petal.width <- scale(c(data.iris[,4]),center = FALSE)
data.iris <- cbind(iris.sepal.length,iris.sepal.width,iris.petal.length,iris.petal.width)
#randomize row
data.iris <- data.iris[sample(nrow(data.iris)),]
result <- training(data.iris)
topology <- result[[1]]
average.distances <- result[[2]]
X11()
plot(average.distances,type = "l",xlab = "Iteration",ylab = "Error")
#X11()
#persp(1:nrow(topology),1:ncol(topology),topology,theta = 30, col = "cyan", phi = 20,ticktype = "detailed")
library(rgl)
r3dDefaults$windowRect = c(50,50,720,720)
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(topology, nbcol)
persp3d(1:nrow(topology),1:ncol(topology),topology, col = color[zcol],theta = 30, phi = 20,ticktype = "detailed",xlab = "X", ylab = "Y", zlab = "Frequency")
play3d(spin3d(axis = c(0, 0, 1), rpm = 6), duration = 10)
#movie3d(spin3d(axis = c(0, 0, 1), rpm = 6), duration = 10, movie = "outputfile", dir = getwd())
|
e4890e33ac24d73a4265243bd26cefb93ea9fa56
|
052989647c39e571aa2d5637f52ff08772404e29
|
/3/IO/dputdump.R
|
5a4008d07e5d1dc4cedd99eb0c7772476e5b6be0
|
[] |
no_license
|
bhaskar-manguluri/JHU
|
f13736df9447ba96de3b927d8f2fd79fe62e6aae
|
2c0d24b9152598d6605a26b930b1c59402b2b198
|
refs/heads/master
| 2021-01-17T08:04:00.528427
| 2017-03-29T16:18:13
| 2017-03-29T16:18:13
| 83,831,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15
|
r
|
dputdump.R
|
c("temp", "x")
|
50735fa27ae44458645f7663a2d5aba6edc8f63b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bayesmeta/examples/HinksEtAl2010.Rd.R
|
ef3dcf9ec37c0b9eac09c7446c38fdcf590e801e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,771
|
r
|
HinksEtAl2010.Rd.R
|
library(bayesmeta)
### Name: HinksEtAl2010
### Title: JIA example data
### Aliases: HinksEtAl2010
### Keywords: datasets
### ** Examples
data("HinksEtAl2010")
## Not run:
##D # perform meta analysis based on weakly informative half-normal prior:
##D bma01 <- bayesmeta(y = HinksEtAl2010$log.or,
##D sigma = HinksEtAl2010$log.or.se,
##D labels = HinksEtAl2010$study,
##D tau.prior = function(t){dhalfnormal(t,scale=1.0)})
##D
##D # perform meta analysis based on slightly more informative half-normal prior:
##D bma02 <- bayesmeta(y = HinksEtAl2010$log.or,
##D sigma = HinksEtAl2010$log.or.se,
##D labels = HinksEtAl2010$study,
##D tau.prior = function(t){dhalfnormal(t,scale=0.5)})
##D
##D # show heterogeneity posteriors:
##D par(mfrow=c(2,1))
##D plot(bma01, which=4, prior=TRUE, taulim=c(0,1))
##D plot(bma02, which=4, prior=TRUE, taulim=c(0,1))
##D par(mfrow=c(1,1))
##D
##D # show heterogeneity estimates:
##D rbind("half-normal(1.0)"=bma01$summary[,"tau"],
##D "half-normal(0.5)"=bma02$summary[,"tau"])
##D # show q-profile confidence interval for tau in comparison:
##D require("metafor")
##D ma03 <- rma.uni(yi=log.or, sei=log.or.se, slab=study, data=HinksEtAl2010)
##D confint(ma03)$random["tau",c("ci.lb","ci.ub")]
##D # show I2 values in the relevant range:
##D tau <- seq(0, 0.7, by=0.1)
##D cbind("tau"=tau,
##D "I2" =bma01$I2(tau=tau))
##D
##D # show effect estimates:
##D round(rbind("half-normal(1.0)" = bma01$summary[,"mu"],
##D "half-normal(0.5)" = bma02$summary[,"mu"]), 5)
##D
##D # show forest plot:
##D forestplot(bma02)
##D # show shrinkage estimates:
##D bma02$theta
## End(Not run)
|
3fc508cdab5fc9fb47bc85b9f4192c08d22dcf01
|
78e656557b5cc6b77f8a30a3792e41b6f79f2f69
|
/aslib/R/convertFeats.R
|
1acc53605129889647f84bdbafc18e82e11ccdb9
|
[] |
no_license
|
coseal/aslib-r
|
f7833aa6d9750f00c6955bade2b8dba6b452c9e1
|
2363baf4607971cd2ed1d784d323ecef898b2ea3
|
refs/heads/master
| 2022-09-12T15:19:20.609668
| 2022-09-02T17:48:51
| 2022-09-02T17:48:51
| 27,724,280
| 6
| 7
| null | 2021-10-17T17:34:54
| 2014-12-08T16:38:21
|
R
|
UTF-8
|
R
| false
| false
| 1,617
|
r
|
convertFeats.R
|
# helper to convert feats to llama or mlr
convertFeats = function(asscenario, feature.steps, with.id, type) {
assertChoice(type, c("instance", "algorithm"))
if (type == "instance") {
feature.col = "feature.values"
step.col = "feature_steps"
id.col = "instance_id"
sortBy = c("instance_id", "repetition")
} else if (type == "algorithm") {
feature.col = "algorithm.feature.values"
step.col = "algorithm_feature_steps"
id.col = "algorithm"
sortBy = c("algorithm", "repetition")
}
# simply return NULL if algorithm features are needed but not present
if (is.null(asscenario[[feature.col]]) && type == "algorithm") {
return(NULL)
}
# reduce to inst + rep + allowed features
# note that feats is ordered by instance, then repetition
allowed.features = getProvidedFeatures(asscenario, type = type)
feats = asscenario[[feature.col]]
feats = feats[, c(sortBy, allowed.features), drop = FALSE]
# aggregate features, only do this if repeated measurements to save time
if (type == "instance") {
if (max(feats$repetition) > 1L) {
feats = ddply(feats, c("instance_id"), function(d) {
colMeans(d[, allowed.features, drop = FALSE])
})
} else {
feats$repetition = NULL
}
}
# FIXME:
# remove constant features, currently we do not count NAs as an extra-level
# the feature would still be completely constant if we impute just with mean
# THIS CHANGES IF WE CREATE DUMMIES FOR NAs LIKE WE SHOULD!
feats = removeConstScenFeats(feats, id = id.col)
if (!with.id)
feats[[id.col]] = NULL
return(feats)
}
|
ded1a3b83265dc9ebe06e6aac51e808a8d4f5376
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/IntervalSurgeon/man/breaks.Rd
|
5b91ff4ef71e40b04a2dfbf189b564b313ce3906
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 626
|
rd
|
breaks.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{breaks}
\alias{breaks}
\title{Get break points for set of intervals}
\usage{
breaks(x)
}
\arguments{
\item{x}{Integer matrix of two columns, the first column giving the (inclusive) start points of intervals and the second column giving the corresponding (exclusive) end points.}
}
\value{
Ordered integer vector of unique interval start/end points.
}
\description{
Get the sorted set start points and end points for a set of intervals specified as an integer matrix.
}
\examples{
breaks(cbind(2*1:5, 3*1:5))
}
|
f9ad8e4620bc12a6a3506bc57ebf50dceef149ca
|
5c133fd27806cfe67789dd4b2db1508764908ee8
|
/PLOT2.R
|
71d581f2e5426a25c2d20aef0191683e62381df3
|
[] |
no_license
|
nsamniegorojas/ExData_Plotting1
|
6a42a644911b93d8c9a31ef1d23d6e09e98d4d11
|
d79160dfe6e759284d5c4916513c2fa5eae0c89d
|
refs/heads/master
| 2023-01-20T15:56:30.154420
| 2020-11-28T16:52:56
| 2020-11-28T16:52:56
| 300,127,236
| 0
| 0
| null | 2020-10-01T03:21:08
| 2020-10-01T03:21:07
| null |
UTF-8
|
R
| false
| false
| 717
|
r
|
PLOT2.R
|
#reading file csv
rm(list=ls())
#reading file csv
setwd ("C:/R_PROJECTS/EXPLORATORY_DATA")
#install.packages ("readr")
#library("readr")
data <-read.csv2 ("C:/R_PROJECTS/EXPLORATORY_DATA/PROJECT1_EDA/household_power_consumption.txt", header = TRUE)
data <- na.omit(data)
#subsetting data
subdata<-subset(data, data$Date=="1/2/2007"|data$Date=="2/2/2007")
# Converting dates&Time
ddtime<-paste(as.Date(subdata$Date, format="%d/%m/%Y"), subdata$Time)
ddtime1<- as.POSIXct(ddtime)
typeof (ddtime1)
ddtime1
#PLOT No. 2
png("plot2.png", width=480, height = 480)
plot(ddtime1, subdata$Global_active_power, type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
file.show("plot2.png")
|
9bfe92dbfd954e11fac2a2750b28f3c59427bb55
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.security.identity/man/ssoadmin_create_account_assignment.Rd
|
59786817173769d6174d5390f33ef942f99c772c
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,655
|
rd
|
ssoadmin_create_account_assignment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssoadmin_operations.R
\name{ssoadmin_create_account_assignment}
\alias{ssoadmin_create_account_assignment}
\title{Assigns access to a principal for a specified AWS account using a
specified permission set}
\usage{
ssoadmin_create_account_assignment(
InstanceArn,
TargetId,
TargetType,
PermissionSetArn,
PrincipalType,
PrincipalId
)
}
\arguments{
\item{InstanceArn}{[required] The ARN of the IAM Identity Center instance under which the operation
will be executed. For more information about ARNs, see Amazon Resource
Names (ARNs) and AWS Service Namespaces in the \emph{AWS General Reference}.}
\item{TargetId}{[required] TargetID is an AWS account identifier, typically a 10-12 digit string
(For example, 123456789012).}
\item{TargetType}{[required] The entity type for which the assignment will be created.}
\item{PermissionSetArn}{[required] The ARN of the permission set that the admin wants to grant the
principal access to.}
\item{PrincipalType}{[required] The entity type for which the assignment will be created.}
\item{PrincipalId}{[required] An identifier for an object in IAM Identity Center, such as a user or
group. PrincipalIds are GUIDs (For example,
f81d4fae-7dec-11d0-a765-00a0c91e6bf6). For more information about
PrincipalIds in IAM Identity Center, see the IAM Identity Center
Identity Store API Reference.}
}
\description{
Assigns access to a principal for a specified AWS account using a specified permission set.
See \url{https://www.paws-r-sdk.com/docs/ssoadmin_create_account_assignment/} for full documentation.
}
\keyword{internal}
|
cb31b90bca2e2205e186a97a7c02b10798d375e1
|
d4149554bdfa874ada418d6bf7cfd2ad044e402e
|
/Consulting Final.R
|
5de00aaeb2f0abed46c3236c61e67938383c3c93
|
[] |
no_license
|
njfox12/Genetic_Expression
|
82f412af6ee5b916277f5ff67c11bd2abf78310d
|
a0cba42b684fcbddfdcbb515f6f7ca7456ea9826
|
refs/heads/master
| 2021-01-13T04:39:01.131774
| 2017-01-23T18:44:52
| 2017-01-23T18:44:52
| 79,382,832
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,789
|
r
|
Consulting Final.R
|
####This script takes Matrix eQTL input files and creates a weight file and a predictive performance (R2) file
####The weight output file can be used to generate a .db file for use in PrediXcan with generate_sqlite_dbs.py
####by Heather E. Wheeler 20160803####
###This is script is based on Heather Wheeler's original code to run an elastic net model
###The script is modified to run SVM-KNN-PCA-NS
###The initial part of this code is provided by Dr. Wheeler from her previous study in order to create
###The data set to be used in the study.
date <- Sys.Date()
args <- commandArgs(trailingOnly=T)
args <- c('22','1','YRI') #uncomment for testing in RStudio
"%&%" = function(a,b) paste(a,b,sep="")
###############################################
### Directories & Variables
exp.dir <- "C:/Users/Nick/Dropbox/STAT_consulting_data/"
snp.dir <- "C:/Users/Nick/Dropbox/STAT_consulting_data/"
snp.annot.dir <- "C:/Users/Nick/Dropbox/STAT_consulting_data/"
out.dir <- "C:/Users/Nick/Dropbox/STAT_consulting_data/output/"
k <- 10 ### k-fold CV
n <- 1 #number of k-fold CV replicates
##alpha = The elasticnet mixing parameter, with 0≤α≤ 1.
#alpha=1 is the lasso penalty, and alpha=0 the ridge penalty.
chromosome <- args[1]
alpha <- as.numeric(args[2]) #alpha to test in CV
pop <- args[3]
### alter filenames according to how you named them
exp.file <- exp.dir %&% pop %&% "_Expression.txt.gz"
exp.annot.file <- exp.dir %&% "GRCh37_hg19_ILMN_Human-6_v2_gene_annotation_for_elasticNet.txt"
snp.file <- snp.dir %&% pop %&% "_" %&% chromosome %&% ".SNP.txt.gz"
snp.annot.file <- snp.annot.dir %&% pop %&% "_" %&% chromosome %&% ".SNP.Location.txt.gz"
################################################
### Functions & Libraries
library(glmnet)
library(dplyr)
################################################
##get gene pos info
gencode <- read.table(exp.annot.file,header=TRUE)
##get snp pos info
snpcode <- read.table(snp.annot.file,header=TRUE)
##get snp allele info (needed for weight output)
allelecode <- read.table(snp.dir %&% "chr" %&% chromosome %&% "_" %&% pop %&% "_alleles.txt.gz")
colnames(allelecode) <- c("CHR","POS","SNP","refAllele","effectAllele")
rownames(allelecode) <- allelecode$POS #name by position b/c we found duplicate rsids
##read exp and chr gt dosages
exp <- read.table(exp.file, header=TRUE)
gt <- read.table(snp.file,header=TRUE)
##join pos info
popgt <- left_join(snpcode,gt,by=c("snp"="id"))
popgt <- popgt[duplicated(popgt$snp)==FALSE,] #remove duplicated rsids with incorrect pos
popgt <- popgt[duplicated(popgt$pos)==FALSE,] #remove duplicated pos
rownames(popgt) <- popgt[,3] #name by position b/c we found duplicate rsids
popgt <- popgt[popgt[,3] %in% allelecode$POS,] #only keep SNPs in allelecode file (removes esv SNPs)
##join gene info
popexp <- left_join(gencode,exp,by=c("geneid"="id"))
popsamplelist <- colnames(exp)[-1]
#pull gene info & expression from pop of interest
popexp <- dplyr::filter(popexp,chrom==chromosome)
explist <- as.character(popexp$geneid)
set.seed(42)
groupid <- sample(1:10,length(popsamplelist),replace=TRUE) ##need to use same folds to compare alphas
resultsarray <- array(0,c(length(explist),8))
dimnames(resultsarray)[[1]] <- explist
resultscol <- c("gene","alpha","cvm","lambda.iteration","lambda.min","n.snps","R2","pval")
dimnames(resultsarray)[[2]] <- resultscol
workingbest <- out.dir %&% "HapMap3_" %&% pop %&% "_exp_" %&% k %&% "-foldCV_elasticNet_alpha" %&% alpha %&% "_chr" %&% chromosome %&% "_" %&% date %&% ".txt"
write(resultscol,file=workingbest,ncolumns=8,sep="\t")
weightcol = c("gene", "rsid", "ref", "alt", "beta", "alpha") #col headers for use with generate_sqlite_dbs.py
workingweight <- out.dir %&% "HapMap3_" %&% pop %&% "_elasticNet_alpha" %&% alpha %&% "_weights_chr" %&% chromosome %&% "_" %&% date %&% ".txt"
write(weightcol,file=workingweight,ncolumns=6,sep="\t")
R2.01<-matrix()
A.R2.01<-matrix()
R2.11<-matrix()
A.R2.11<-matrix()
R2.model1<-matrix()
library(e1071)
library(kknn)
library(splines)
for(i in 1:length(explist)){
cat(i,"/",length(explist),"\n")
gene <- explist[i]
start <- popexp$s1[i] - 1e6 ### 1Mb gene lower bound for cis-eQTLS
end <- popexp$s2[i] + 1e6 ### 1Mb gene upper bound for cis-eQTLs
cisgenos <- subset(popgt,popgt[,3]>=start & popgt[,3]<=end) ### pull cis-SNP genotypes
rownames(cisgenos) <- cisgenos$pos #carry positions along
cismat <- as.matrix(cisgenos[,4:dim(cisgenos)[2]]) #get dosages only in matrix format for glmnet
cismat <- t(cismat) #transpose to match previous code
expmat <- as.matrix(popexp[,9:dim(popexp)[2]]) #make exp only in matrix format for glmnet
expmat <- t(expmat) #transpose to match previous code
colnames(expmat) <- popexp$geneid #carry gene IDs along
if(is.null(dim(cismat))){
#if(is.null(dim(cismat)) | gene=="ILMN_1740816"){ #special case for GIH alpha=0 to skip Error in predmat[which, seq(nlami)] = preds : replacement has length zero
bestbetas <- data.frame() ###effectively skips genes with 0 cis-SNPs
}else{
minorsnps <- subset(colMeans(cismat), colMeans(cismat,na.rm=TRUE)>0) ###pull snps with at least 1 minor allele###
minorsnps <- names(minorsnps)
cismat <- cismat[,minorsnps]
if(length(minorsnps) < 2){###effectively skips genes with <2 cis-SNPs
bestbetas <- data.frame() ###effectively skips genes with <2 cis-SNPs
}else{
exppheno <- expmat[,gene] ### pull expression data for gene
exppheno <- scale(exppheno, center=T, scale=T) ###scale to compare across genes
exppheno[is.na(exppheno)] <- 0
#Code that I was responsible for writing with the creation of the data set
colnames(exppheno) <- "exp"
pe <- cbind(exppheno, cismat)
pe <- as.data.frame(pe)
pe.best.k <- cbind(exppheno,cismat)
pe.best.k <- as.data.frame(pe.best.k)
pe.best.k$exp <- as.factor(ifelse(pe.best.k$exp > 0, "1", "0"))
train.k<-train.kknn(exp~.,pe.best.k,ks=c(1:15),distance=1)
best.k<-train.k$best.parameters$k
#Runs the model with the best k
svm.data<-pe[,-max(dim(pe.best.k)[2])]
model.svm<-svm(exp~.,data=svm.data,scale=FALSE)
final.train<-pe.best.k[model.svm$index,-max(dim(pe.best.k)[2])]
final.test<-pe.best.k[-max(dim(pe.best.k)[2])]
model.knn<-kknn(exp~.,final.train,final.test,k=best.k,kernel="rectangular",dist=1)
fv <- model.knn$fitted.values
gp1 <- which(fv==0)
dat0 <- as.data.frame(pe[gp1,])
dat00<-as.data.frame(pe[gp1,-1])
dat1 <- as.data.frame(pe[-gp1,])
dat01<-as.data.frame(pe[-gp1,-1])
pc0 <- prcomp(t(dat00))
pcom0 <- pc0$rotation
z<-summary(pc0)
l<-z$importance[2,]
m<-which(l>=.1)
co0<-max(m)
st0 <- lm(paste("dat0$exp ~", paste(paste0("ns(pcom0[,", 1:co0, "],3)"), collapse = "+")))
mod.sum0 <-summary(st0)
pc1 <- prcomp(t(dat01))
pcom1 <- pc1$rotation
z1<-summary(pc1)
l1<-z1$importance[2,]
m1<-which(l1>=.1)
co1<-max(m1)
st1 <- lm(paste("dat1$exp ~", paste(paste0("ns(pcom1[,", 1:co1, "],3)"), collapse = "+")))
mod.sum1 <- summary(st1)
ybar0<-mean(dat0$exp)
ybar1<-mean(dat1$exp)
SSE<-sum(sum((st0$fitted.values-dat0$exp)**2),sum((st1$fitted.values-dat1$exp)**2))
SST<-sum(sum((dat0$exp-ybar0)**2),sum((dat1$exp-ybar1)**2))
R2.f<-1-(SSE/SST)
}
}
R2.01[i] <- mod.sum0$r.squared
A.R2.01[i] <- mod.sum0$adj.r.squared
R2.11[i] <- mod.sum1$r.squared
A.R2.11[i] <- mod.sum1$adj.r.squared
R2.model1[i]<-R2.f
}
|
e1d020b2afd1414a6e0bca676a87a2f7bb923e30
|
487a34c5ace2b1a60229c5403335de734616561e
|
/4f2-quantmod1.R
|
5b3e4946de7e61a27e1d271f28e335940ed828ce
|
[] |
no_license
|
hhenoida/dataanalytics
|
39d261a288f90c97effc0358d49fd2ffb8566578
|
c563272f7890a0731fbb9e24e5ff6309ea8586ee
|
refs/heads/master
| 2020-03-31T19:50:12.135620
| 2018-10-29T12:40:39
| 2018-10-29T12:40:39
| 152,513,808
| 259
| 15
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,688
|
r
|
4f2-quantmod1.R
|
#https://ntguardian.wordpress.com/2017/03/27/introduction-stock-market-data-r-1/
#Stock Analysis
#install package quantmod
# Get quantmod
if (!require("quantmod")) {
install.packages("quantmod")
library(quantmod)
}
start <- as.Date("2017-01-01")
end <- as.Date("2018-10-28")
# Let's get Apple stock data; Apple's ticker symbol is AAPL. We use the quantmod function getSymbols, and pass a string as a first argument to identify the desired ticker symbol, pass 'yahoo' to src for Yahoo! Finance, and from and to specify date ranges
# The default behavior for getSymbols is to load data directly into the global environment, with the object being named after the loaded ticker symbol. This feature may become deprecated in the future, but we exploit it now.
getSymbols("AAPL", src = "yahoo", from = start, to = end)
# What is AAPL?
class(AAPL)
head(AAPL)
tail(AAPL)
plot(AAPL[, "AAPL.Close"], main = "AAPL")
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white")
# Let's get data for Microsoft (MSFT) and Google (GOOG) (actually, Google isheld by a holding company called Alphabet, Inc., which is the company traded on the exchange and uses the ticker symbol GOOG).
getSymbols(c("MSFT", "GOOG"), src = "yahoo", from = start, to = end)
# Create an xts object (xts is loaded with quantmod) that contains closing prices for AAPL, MSFT, and GOOG
stocks = as.xts(data.frame(AAPL = AAPL[, "AAPL.Close"], MSFT = MSFT[, "MSFT.Close"], GOOG = GOOG[, "GOOG.Close"]))
head(stocks)
tail(stocks)
# Create a plot showing all series as lines; must use as.zoo to use the zoo method for plot, which allows for multiple series to be plotted on same plot
plot(as.zoo(stocks), screens = 1, lty = 1:3, xlab = "Date", ylab = "Price")
legend("right", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
plot(as.zoo(stocks[, c("AAPL.Close", "MSFT.Close")]), screens = 1, lty = 1:2, xlab = "Date", ylab = "Price")
par(new = TRUE)
plot(as.zoo(stocks[, "GOOG.Close"]), screens = 1, lty = 3, xaxt = "n", yaxt = "n", xlab = "", ylab = "")
axis(4)
mtext("Price", side = 4, line = 3)
legend("topleft", c("AAPL (left)", "MSFT (left)", "GOOG"), lty = 1:3, cex = 0.5)
# Get pipe operator!
if (!require("magrittr")) {
install.packages("magrittr")
library(magrittr)
}
stock_return = apply(stocks, 1, function(x) {x / stocks[1,]}) %>% t %>% as.xts
head(stock_return)
plot(as.zoo(stock_return), screens = 1, lty = 1:3, xlab = "Date", ylab = "Return")
legend("topleft", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
stock_change = stocks %>% log %>% diff
head(stock_change)
plot(as.zoo(stock_change), screens = 1, lty = 1:3, xlab = "Date", ylab = "Log Difference")
legend("topleft", c("AAPL", "MSFT", "GOOG"), lty = 1:3, cex = 0.5)
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white")
addSMA(n = 20)
#------
start = as.Date("2010-01-01")
getSymbols(c("AAPL", "MSFT", "GOOG"), src = "yahoo", from = start, to = end)
# The subset argument allows specifying the date range to view in the chart.
# This uses xts style subsetting. Here, we r using the idiom
# 'YYYY-MM-DD/YYYY-MM-DD', where the date on the left-hand side of the / is
# the start date, and the date on the right-hand side is the end date. If
# either is left blank, either the earliest date or latest date in the series is used (as appropriate). This method can be used for any xts object, say, AAPL
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-01-04/")
addSMA(n = 20)
candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-01-04/")
addSMA(n = c(20, 50, 200))
getSymbols("SBIN.NS", src = "yahoo", from = start, to = end)
tail(SBIN.NS)
|
23c8bfed8f5ac132400863f7072ff536efd0efbb
|
81be8ee066df08136b774f8a95f97d2fbd90a1eb
|
/man/at_inq.Rd
|
f9f2c4c03289af04f5f88af562fe52a872e384a2
|
[] |
no_license
|
patperu/AriaToscana
|
7e80dbad0a292276a2be7c0c347c0d52f0569fee
|
8c289c74c5c4595643227301ea72a31d436739ec
|
refs/heads/master
| 2021-01-18T21:52:33.076624
| 2016-05-28T10:07:19
| 2016-05-28T10:07:19
| 51,249,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 981
|
rd
|
at_inq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/at_inq.R
\docType{data}
\name{at_inq}
\alias{at_inq}
\title{Measurements of Air Pollutants (2008 - 2014)}
\format{Data frame with columns
\describe{
\item{stazione}{Stazione, first two characters are the Provincia}
\item{parameter}{inquinante}
\item{year}{Year}
\item{month}{Month}
\item{day}{Day}
\item{hour}{Hour}
\item{value}{Measure}
\item{valid}{0 = measure is not valid, 1 = measure is valid}
}}
\usage{
at_inq
}
\description{
This dataset contains the hourly measures of the following air pollutants:
}
\details{
BENZENE 263040
CH4 184128
CO 1507846
EBENZENE 254256
H2S 210408
HCL 26304
M-XYLENE 192912
MP-XILENE 8784
MP-XYLENE 26280
N-EPTANO 70152
N-ESANO 70152
N-OTTANO 70152
NH3 61368
NMHC 184128
NO 3015888
NO2 3015888
NOX 3015888
O-XYLENE 245472
O3 1446620
P-XYLENE 227976
SO2 692616
TN 61368
TNX 61368
TOLUENE 263040
}
\author{
Patrick Hausmann, Source: ARPAT Toscana
}
\keyword{datasets}
|
9cd8edb6c05bab695ad8d42031a5e1f765862ded
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/1941_2/rinput.R
|
10e67ff25f295db9e8bb99e6538002565706a9a2
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("1941_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1941_2_unrooted.txt")
|
550b317523f7c43760dc1dbeff60641a1aaa0c49
|
384c3dbc571be91c6f743d1427dec00f13e0d8ae
|
/r/kernels/stevengolo-titanic-machine-learning-from-disaster/script/titanic-machine-learning-from-disaster.R
|
53155f021c6d18c3f9b792519e7a7a8dcdaa6691
|
[] |
no_license
|
helenaK/trustworthy-titanic
|
b9acdd8ca94f2fa3f7eb965596eed4a62821b21e
|
ade0e487820cf38974561da2403ebe0da9de8bc6
|
refs/heads/master
| 2022-12-09T20:56:30.700809
| 2020-09-10T14:22:24
| 2020-09-10T14:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,036
|
r
|
titanic-machine-learning-from-disaster.R
|
## ----setup, include=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE)
## ----packages, message=FALSE, warning=FALSE, include=FALSE, paged.print=FALSE----------------------------------------------------------------------------------------------------------------------------------------------------
## Importing packages
library(glmnet)
library(gridExtra)
library(kableExtra)
library(knitr)
library(randomForest)
library(reshape2)
library(tidyverse)
# Create custom theme for ggplot2
theme_custom <- function(base_family = "Times"){
theme_minimal(base_family = base_family) %+replace%
theme(
plot.title = element_text(size = 20),
plot.subtitle = element_text(size = 16, vjust = -1),
axis.title = element_text(size = 18),
axis.text = element_text(size = 16),
axis.title.y = element_text(margin = margin(t = 0, r = 20, b = 0, l = 0), angle = 90),
axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)),
strip.text.x = element_text(size = 16)
)
}
## ----load, message=FALSE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
train <- read_csv('../input/train.csv')
test <- read_csv('../input/test.csv')
titanic <- train %>%
bind_rows(test) %>%
select(-PassengerId) %>%
mutate_at(vars(Pclass, Sex, Embarked), funs(factor(.)))
## ----class, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
ggplot(aes(x = Pclass)) +
geom_bar(width = 0.5, fill = "#56B4E9") +
coord_flip() +
labs(title = 'Count of the passengers by class') +
scale_x_discrete(name = "Passenger's class") +
scale_y_continuous(name = "Count", breaks = seq(0, 900, 100)) +
theme_custom()
## ----sex, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
ggplot(aes(x = Sex)) +
geom_bar(width = 0.5, fill = "#56B4E9") +
coord_flip() +
labs(title = 'Count of the passengers by sex') +
scale_x_discrete(name = "Passenger's sex") +
scale_y_continuous(name = "Count", breaks = seq(0, 900, 100)) +
theme_custom()
## ----title, echo=TRUE, message=FALSE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Extract the title from the Passenger's name.
Title <- "^.*, (.*?)\\..*$" %>%
gsub("\\1", titanic$Name)
# Create another factors for low represented title.
title_high <- c('Mr', 'Miss', 'Mrs', 'Master')
Title <- Title %in% title_high %>%
if_else(Title, 'Other')
# Add titlecolumn to the dataframe
titanic <- titanic %>%
add_column(Title) %>%
mutate_at(vars(Title), funs(factor(.)))
## ----name, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
ggplot(aes(x = Title)) +
geom_bar(width = 0.5, fill = "#56B4E9") +
coord_flip() +
labs(title = 'Count of the passengers by title') +
scale_x_discrete(name = "Passenger's title") +
scale_y_continuous(name = "Count", breaks = seq(0, 900, 100)) +
theme_custom()
## ----missing_port, echo=FALSE, message=FALSE, warning=FALSE----------------------------------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
filter(is.na(Embarked)) %>%
kable(format = 'html') %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
## ----plot_missing_embarked, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE---------------------------------------------------------------------------------------------------------------------------------
titanic %>%
filter(Pclass == 1) %>%
ggplot(aes(x = Embarked, y = Fare)) +
geom_boxplot(colour = "black", fill = "#56B4E9", outlier.colour = 'red') +
ggtitle("Fare depending on the port of embarkation for the first class") +
scale_x_discrete(name = "Port of embarkation") +
scale_y_continuous(name = "Fare") +
theme_custom()
## ----inputation_port, echo=TRUE, message=FALSE, warning=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------
titanic[62, "Embarked"] <- "C"
titanic[830, "Embarked"] <- "C"
## ----port, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
ggplot(aes(x = Embarked)) +
geom_bar(width = 0.5, fill = "#56B4E9") +
coord_flip() +
labs(title = 'Count of the passengers by port of embarkation') +
scale_x_discrete(name = "Passenger's port of embarkation") +
scale_y_continuous(name = "Count", breaks = seq(0, 900, 100)) +
theme_custom()
## ----missing_fare, echo=FALSE, message=FALSE, warning=FALSE----------------------------------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
filter(is.na(Fare)) %>%
kable(format = 'html') %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
## ----plot_fare, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE---------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
filter(Embarked == 'S', Pclass == 3) %>%
ggplot(aes(x = Fare)) +
geom_density(kernel = 'gaussian', colour = "#FFFFFF", fill = "#56B4E9") +
ggtitle("Density of Fare", subtitle = "Embarked in Southampton in third class") +
xlab("Fare ($)") + ylab("Density") +
theme_custom()
## ----fare_inputation, echo=TRUE, message=FALSE, warning=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------
titanic[1044, "Fare"] <- titanic %>% filter(Embarked == 'S', Pclass == 3) %>% pull(Fare) %>% median(na.rm = TRUE)
## ----plot_age, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE----------------------------------------------------------------------------------------------------------------------------------------------
grid.arrange(
titanic %>%
ggplot(aes(y = Age)) +
geom_boxplot(fill = "#56B4E9", outlier.colour = 'red') +
coord_flip() +
ggtitle("Boxplot of Age") +
scale_x_continuous(breaks = NULL) +
theme_custom(),
titanic %>%
ggplot(aes(x = Age)) +
geom_density(kernel = 'gaussian', colour = "#FFFFFF", fill = "#56B4E9") +
ggtitle("Density of Age") +
xlab("Age") + ylab("Density") +
theme_custom(),
ncol = 2, nrow = 1)
## ----ridge_regression_age, echo=TRUE, message=FALSE, warning=FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------
# Split the dataset into the ones with Age and the ones without Age.
titanic.with.age <- titanic %>%
filter(!is.na(Age)) %>%
select(-c(Survived, Name, Ticket, Cabin))
titanic.without.age <- titanic %>%
filter(is.na(Age)) %>%
select(-c(Survived, Name, Ticket, Cabin)) %>%
mutate(Age = 0)
# Build a model matrix of the data
titanic.lm <- lm(Age ~ ., data = titanic.with.age)
titanic.with.age.model.matrix <- model.matrix(titanic.lm, data = titanic.with.age)[,-1]
# Perform the Ridge Regression (alpha = 0)
titanic.age.model <- glmnet(titanic.with.age.model.matrix, titanic.with.age$Age, alpha = 0)
# Prediction of the Age
titanic.without.age$Age <- predict(titanic.age.model,
newx = model.matrix(titanic.lm, data = titanic.without.age)[, -1],
s = cv.glmnet(titanic.with.age.model.matrix, titanic.with.age$Age, alpha = 0)$lambda.min,
type = 'link')
# Replace the missing Age into the all dataset
titanic[is.na(titanic$Age), "Age"] <- titanic.without.age$Age
## ----plot_age2, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE---------------------------------------------------------------------------------------------------------------------------------------------
grid.arrange(
titanic %>%
ggplot(aes(y = Age)) +
geom_boxplot(fill = "#56B4E9", outlier.colour = 'red') +
coord_flip() +
ggtitle("Boxplot of Age") +
scale_x_continuous(breaks = NULL) +
theme_custom(),
titanic %>%
ggplot(aes(x = Age)) +
geom_density(kernel = 'gaussian', colour = "#FFFFFF", fill = "#56B4E9") +
ggtitle("Density of Age") +
xlab("Age") + ylab("Density") +
theme_custom(),
ncol = 2, nrow = 1)
## ----plot_sipsp, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE--------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
ggplot(aes(x = as.factor(SibSp))) +
geom_bar(width = 0.5, fill = "#56B4E9") +
coord_flip() +
labs(title = 'Count of the passengers number of siblings/spouses') +
scale_x_discrete(name = "Number of Siblings/Spouses") +
scale_y_continuous(name = "Count", breaks = seq(0, 900, 100)) +
theme_custom()
## ----plot_parch, echo=FALSE, fig.height=5, fig.width=15, message=FALSE, warning=FALSE--------------------------------------------------------------------------------------------------------------------------------------------
titanic %>%
ggplot(aes(x = as.factor(Parch))) +
geom_bar(width = 0.5, fill = "#56B4E9") +
coord_flip() +
labs(title = 'Count of the passengers number of parents/children') +
scale_x_discrete(name = "Number of Parents/Children") +
scale_y_continuous(name = "Count", breaks = seq(0, 1000, 100)) +
theme_custom()
## ----del_cabin, echo=TRUE, message=FALSE, warning=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
titanic <- titanic %>% select(-Cabin)
## ----del_ticket, echo=TRUE, message=FALSE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
titanic <- titanic %>% select(-Ticket)
## ----model, echo=TRUE, message=FALSE, warning=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
train <- titanic %>% select(-Name) %>% filter(!is.na(Survived))
test <- titanic %>% select(-Name) %>% filter(is.na(Survived))
# Split the train set into two dataset (for validation)
set.seed(42)
sample <- sample(c(TRUE, FALSE), nrow(train), replace = TRUE, prob = c(2/3, 1/3))
train.val <- train[sample, ]
test.val <- train[!sample, ]
# Perform Ridge regression
train.lm <- lm(Survived ~ ., data = train.val)
X <- model.matrix(train.lm, data = train.val)[ , -1]
Y <- train.val$Survived
train.ridge.model <- glmnet(X, Y, alpha = 0, family = 'binomial')
# Prediction on the test.val set
test.val.predict <- predict(train.ridge.model,
s = cv.glmnet(X, Y, alpha = 0)$lambda.min,
newx = model.matrix(train.lm, data = test.val)[ , -1],
type = 'class')
## ----submission, echo=TRUE, message=FALSE, warning=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Prediction of the test set
test$Survived <- 0
test.predict <- predict(train.ridge.model,
s = cv.glmnet(X, Y, alpha = 0)$lambda.min,
newx = model.matrix(train.lm, data = test)[ , -1],
type = 'class')
# Construt the dataframe
result <- data.frame(PassengerID = row.names(test.predict),
Survived = test.predict[ , 1])
# Export as CSV
write.csv(result, 'results.csv', row.names = FALSE)
|
397e10e2e63a78a34e6439afa76914c530ba196c
|
8363dcba0ae7bdb88bc1d9763ec4f55a0b80cd6b
|
/R-proj/sensitivity/one_run.R
|
bc62a9f9764014988bc0462cc002f0f31f7cd3ce
|
[] |
no_license
|
FredHutch/COVID_modeling_sensitivity
|
359753ce19434829b09401d687b7bb40379bdfc3
|
c0fe21db64e29dd5e70488da4ad60cc37cfd1747
|
refs/heads/master
| 2023-05-08T18:23:37.274565
| 2021-06-01T21:12:16
| 2021-06-01T21:12:16
| 351,925,937
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,488
|
r
|
one_run.R
|
set.seed(20)
print_legend = 0
setwd("..") # run from R-proj directory
source("covid-model.R")
source("kc_read-data.R")
#Settings used in all scenarios (for now)
#max_sd<-0.6 # how far to tighten SD
#Rscript one_run.R $dist $age_code $vac_eff_i $vac_eff_s $vac_eff_p $vac_rate $mut_inf $sd_lower $sd_upper $sd_loosen $sd_tighten $coverage $imports $sev_inf $sd_delta
args<-commandArgs(trailingOnly=T)
dist<-args[1] # example: prop, adults, seniors
prior_group<-args[2] # example: 0 (prop), 1, (0-19), etc. (set as a mask to allow multiple groups)
vei<-args[3] # example: 0.1
ves<-args[4] # example: 0.9
vep<-args[5] # example: 0.1
rate<-args[6] # example: 3500
new_strain_fact<-as.numeric(args[7]) # increase in inf for new strain, example: 1.35 (35%), 1.5(50%), etc.
min_sd<-args[8] # how far to relax SD
max_sd<-args[9] # how far to relax SD
trig_min<-args[10] # bi-weekly case rate per 100k pop for loosening SD
trig_max<-args[11] # bi-weekly case rate per 100k pop for tightening SD
cover<- args[12] # age-group vax coverage (fraction)
imports<- args[13] # daily new mutation imports
severity<- args[14] # new mutation increase in severity
sd_delta<- args[15] # how slowly to relax SD
plot_em<-0
new_check_date=0 # no switch from case triggers to percent change in cases/hospitalizations
vac_coverage=as.numeric(cover)
new_strain_intros=as.numeric(imports)
new_strain_severity<-as.numeric(severity) # impact on hosp & death vs main variant
vac_exp_rate=0
# read in calibration fit parameters (representing all calib months)
result_file="calibration/res_test_dec_fit.Rdata"
intervention_day = yday(ymd("2020-5-15")) # Start of intervention protocol
int_rampup = 14 # Time to achieve full intervention effect
load(file = result_file)
calib_vals = res$par
calib_params = get_params(calib_vals, names(res$par), params_fix)
# set interventions
calib_params$beta_d_fact= 0.5
calib_params$dynamic_sd = T
calib_params$sd_trans = 14
calib_params$dynamic_sd_delta = as.numeric(sd_delta)
calib_params$dynamic_sd_min = as.numeric(min_sd)
calib_params$dynamic_sd_min_snrs = as.numeric(min_sd) + 0.2
calib_params$dynamic_sd_max = as.numeric(max_sd)
calib_params$dynamic_sd_max_snrs = as.numeric(max_sd) + 0.2
calib_params$sd_inc=c(0,0,0,0)
calib_params$dynamic_sd_limit = ((as.numeric(trig_min) + as.numeric(trig_max))/2) * the_pop / 100000
calib_params$dynamic_sd_hyster = ((as.numeric(trig_max) - as.numeric(trig_min))/2) * the_pop / 100000
calib_params$severity = 1
#Turn off all childhood vaccinations (0-19)
calib_params$VacChild16plus = 0
calib_params$VacChild12plus = 0
calib_params$VacChild = 0
# Nix KC vaccine schedule!
vac_schedule = matrix(c(366+yday(ymd("2021-1-1")),0, # Start of vaccination protection (1st point)
366+yday(ymd("2021-1-15")),0), # vaccination protection (2nd point)
byrow = TRUE, nrow = 2)
int_param_names = c("vac_on")
interventions = matrix(c(0, 1),
byrow = TRUE, nrow = 2)
row.names(interventions) = c("No Vaccine", "vax")
colnames(interventions) = int_param_names
interventions_abbr = row.names(interventions)
int_rampup = 14 # Time to achieve full intervention effect
vac_eff_hi = 0
vac_final_rate = as.numeric(rate)
vac_first = as.numeric(prior_group)
vac_eff_inf = as.numeric(vei)
vac_eff_pi = as.numeric(vep)
vac_eff_susc = as.numeric(ves)
vac_eff_inf1 = vac_eff_inf
vac_eff_inf2 = vac_eff_inf
vac_eff_pi1 = vac_eff_pi
vac_eff_pi2 = vac_eff_pi
vac_eff_susc1 = vac_eff_susc
vac_eff_susc2 = vac_eff_susc
vac_init_doy = 366 + yday(ymd("2021-1-15")) # Start of vaccination protocol
vac_stop_doy = 366 + yday(ymd("2021-12-31")) # End of vaccination protocol
vac_mutate=1
vac_mutate_time=366+yday(ymd("2021-1-01"))
end_day = 370+vac_init_doy
calib_doy = yday(ymd("2020-12-31")) # End of model calibration
calib_params$calib_doy = calib_doy
suffix=paste0(dist,"_vei_",vei,"_ves_",ves,"_vep_",vep,"_sdmin_",min_sd,"_sdmax_",max_sd,"_rate_",rate,"_mut_",new_strain_fact,"_trigmin_",trig_min,"_trigmax_",trig_max,"_cover_",cover,"_import_",imports,"_sever_",severity,"_sddelta_",sd_delta)
print(suffix)
if (plot_em == 0)
{
scenarios_out = get_model_data_param_sets(interventions, int_param_names, calib_params, end_day, state)
saveRDS(scenarios_out, file = paste0("sens_data/",suffix,".rds"))
quit()
} else {
scenarios_out=readRDS(file = paste0("sens_data/",suffix,".rds"))
}
cols = c("black","red")
x_lim = NULL
setwd("sens_out")
pdf(paste0("daily_cases_",suffix,".pdf"), width = 5, height = 3.5)
x_lim = NULL
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
doy = scenarios_out$doy
doy = doy[-1]
cases = scenarios_out$cases
cases = apply(cases,2, diff)
plot_scenarios(doy, cases, yday(the_data$date), the_data$cases,
y_lab = "Daily Diagnosed Cases", x_lim = NULL, col_pal = cols, col_idx = 1:2,y_lim = c(0,2000),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL)
dev.off()
pdf(paste0("daily_deaths_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
deaths = scenarios_out$deaths
deaths = apply(deaths,2, diff)
plot_scenarios(doy, deaths, yday(the_data$date), the_data$deaths,
y_lab = "Daily Deaths", x_lim = NULL, col_pal = cols, col_idx = 1:2,y_lim = c(0,40),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL)
dev.off()
pdf(paste0("daily_infs_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
inf = scenarios_out$inf
inf = apply(inf,2, diff)
plot_scenarios(doy, inf, yday(the_data$date), NA,
y_lab = "Daily Infections", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,25000),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL)
dev.off()
pdf(paste0("log_daily_infs_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
inf = scenarios_out$inf
inf = apply(inf,2, diff)
plot_scenarios(doy, log10(inf), yday(the_data$date), NA,
y_lab = "Log Daily Infections", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,4),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL)
dev.off()
pdf(paste0("daily_hosps_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
cum_hosp = scenarios_out$cum_hosp
cum_hosp = apply(cum_hosp,2, diff)
plot_scenarios(doy, cum_hosp, yday(the_data$date), NA,
y_lab = "Daily Hospitalizations", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,150),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL)
dev.off()
pdf(paste0("deaths_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
deaths = scenarios_out$deaths
plot_scenarios(doy, deaths, yday(the_data$date), the_data$deaths,
y_lab = "Cumulative Deaths", x_lim = NULL, col_pal = cols, col_idx = 1:2,y_lim = c(0,4000),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL)
dev.off()
pdf(paste0("other_SD_",suffix,".pdf"), width = 5, height = 3.5)
#par(mfrow = c(1,1), mar = 0.1 + c(3, 4, 1, 3), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
plot_scenarios(scenarios_out$doy, scenarios_out$sd_2, yday(the_data$date), NA,
y_lab = "Social Distancing (Non-seniors)", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,1),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], totalpop = 0)
dev.off()
pdf(paste0("infs_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
plot_scenarios(scenarios_out$doy, scenarios_out$inf, yday(the_data$date), NA,
y_lab = "Cumulative Infections", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,0.6*the_pop),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], totalpop = the_pop)
dev.off()
pdf(paste0("inf1_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
plot_scenarios(scenarios_out$doy, scenarios_out$inf1, yday(the_data$date), NA,
y_lab = "Main Variant Infections", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,0.6*the_pop),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], totalpop = the_pop)
dev.off()
pdf(paste0("inf2_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
plot_scenarios(scenarios_out$doy, scenarios_out$inf2, yday(the_data$date), NA,
y_lab = "New Variant Infections", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,0.6*the_pop),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], totalpop = the_pop)
dev.off()
pdf(paste0("daily_inf1_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
inf = scenarios_out$inf1
inf = apply(inf,2, diff)
plot_scenarios(doy, inf, yday(the_data$date), NA,
y_lab = "Daily Infections Main Variant", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,8000),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL)
dev.off()
pdf(paste0("daily_inf2_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
inf = scenarios_out$inf2
inf = apply(inf,2, diff)
plot_scenarios(doy, inf, yday(the_data$date), NA,
y_lab = "Daily Infections New Variant", x_lim = x_lim, col_pal = cols, col_idx = 1:2,y_lim = c(0,8000),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL)
dev.off()
pdf(paste0("daily_perc_inf2_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 4, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,2,0,0))
inf = scenarios_out$inf
inf = apply(inf,2, diff)
inf2 = scenarios_out$inf2
inf2 = apply(inf2,2, diff)
perc_inf = 100 * inf2 / inf
startx= 366 + yday(ymd("2021-1-01")) # Start of x-axis
endx= 366 + yday(ymd("2021-12-01")) # End of x-axis
plot_scenarios(doy, perc_inf, yday(the_data$date), NA,
y_lab = "% Daily Infections New Variant", x_lim = c(startx,endx), col_pal = cols, col_idx = 1:2,y_lim = c(0,100),#lwd=lwds,
delta = NULL, vaccination_date = vac_init_doy, calib_date = NULL, scenarios_out$sd_2[,1], NULL,all_months=1)
dev.off()
pdf(paste0("legend_",suffix,".pdf"), width = 5, height = 3.5)
par(mar = 0.1 + c(3, 1, 1, 4), mgp = c(3, 0.5, 0), oma = c(3,1,0,0))
plot.new()
legend("topleft",
legend = c("No Vaccine","Vaccine", "Vaccination Start"),
col = c(cols, "orange"), lty = c(1,1,2),
lwd = c(2,2,2), bty = "n" , cex=1.5)
dev.off()
|
fd6226a7e64f8fd7dab72b680272984338ed8cc8
|
6b36070e2801967efcfd1f0d4b39d5e7b1623458
|
/demo/aggResults2.R
|
127a2cf48183d083eeb2fefb165b1a35c94b939a
|
[] |
no_license
|
nmmarquez/PointPolygon
|
fbcfd351e2f38a5f30a832fdbe4ff8f15fca3eef
|
a2f9386c3a737528e5dc0db0222c2f6bb6d7a83a
|
refs/heads/master
| 2021-06-22T14:32:21.422859
| 2020-12-06T22:47:00
| 2020-12-06T22:47:00
| 155,793,434
| 1
| 2
| null | 2020-03-05T00:24:59
| 2018-11-02T00:42:02
|
R
|
UTF-8
|
R
| false
| false
| 21,176
|
r
|
aggResults2.R
|
.libPaths(c("~/R3.6/", .libPaths()))
rm(list=ls())
library(tibble)
library(dplyr)
library(parallel)
library(readr)
library(PointPolygon)
library(stringr)
library(tidyr)
library(ggplot2)
library(sf)
library(forcats)
rdsPathList <- list.files("~/Data/spaceTimeTest3/", full.names=TRUE)
resultsDF <- bind_rows(mclapply(rdsPathList, function(f_){
print(f_)
dz <- tryCatch({
x <- readRDS(f_)
pList <- x$pred
bList <- x$betas
tibble(
covType = x$covType,
covVal = x$covVal,
rangeE = x$rangeE,
seed = x$seed,
rmse = sapply(pList, function(y){
sqrt(mean((y$trueValue - y$mu)^2))}),
provrmse = sapply(x$provPred, function(y){
sqrt(mean((y$trueValue - y$mu)^2))}),
bias = sapply(pList, function(y) mean(y$mu - y$trueValue)),
dissDiff = sapply(pList, function(y){
(.5*mean(abs(y$mu / mean(y$mu) - (1-y$mu) / mean(1-y$mu)))) -
(.5*mean(abs(y$trueValue / mean(y$trueValue) -
(1-y$trueValue) / mean(1-y$trueValue))))
}),
coverage = sapply(pList, function(y){
mean(y$trueValue >= y$lwr & y$trueValue <= y$upr)}),
provcoverage = sapply(x$provPred, function(y){
mean(y$trueValue >= y$lwr & y$trueValue <= y$upr)}),
correlation = sapply(pList, function(y) cor(y$mu, y$trueValue)),
b0Cov = sapply(bList, function(b){
bhat <- b$betaHat[1]
sder <- b$betaStErr[1]
as.numeric(((bhat - sder) <= -2) & ((bhat + sder) >= -2))
}),
b1Cov = sapply(bList, function(b){
bhat <- b$betaHat[2]
sder <- b$betaStErr[2]
as.numeric(((bhat - sder) <= x$covVal) & ((bhat + sder) >= x$covVal))
}),
b0Bias = sapply(bList, function(b){
bhat <- b$betaHat[1]
bhat + 2
}),
b1Bias = sapply(bList, function(b){
bhat <- b$betaHat[2]
bhat - x$covVal
}),
b1ERR = sapply(bList, function(b) b$betaStErr[2]),
model = names(pList),
converge = unlist(x$converge),
runtime = unlist(x$runtime))
},
error= function(cond){
tibble()
})
dz}, mc.cores=8)) %>%
mutate(model=gsub("Reimann", "Riemann", model)) %>%
mutate(model=gsub("IHME Resample", "Resample", model)) %>%
mutate(model=gsub("Known", "Unmasked", model)) %>%
mutate(model=gsub("Mixture Model", "Mixture", model)) %>%
mutate(model=gsub("Utazi", "Ecological", model)) %>%
mutate(model = fct_relevel(
model,
"Ignore", "Resample", "Ecological", "Riemann", "Mixture", "Unmasked"))
aggPlotsDR <- list()
(aggPlotsDR$coverage <- resultsDF %>%
mutate(Model=fct_rev(model)) %>%
filter(converge == 0) %>%
group_by(covType, rangeE, Model) %>%
summarize(
mu = mean(coverage),
lwr = quantile(coverage, probs=.025),
upr = quantile(coverage, probs=.975)
) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model)) +
geom_point() +
geom_errorbar() +
theme_classic() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=.95, linetype=2) +
labs(x="Model", y="") +
ggtitle("95% Coverage of Underlying Probability Field") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE))
(aggPlotsDR$coveragePaper <- resultsDF %>%
mutate(Model=fct_rev(model)) %>%
filter(converge == 0 & model != "Riemann") %>%
group_by(covType, rangeE, Model) %>%
summarize(
mu = mean(coverage),
lwr = quantile(coverage, probs=.025),
upr = quantile(coverage, probs=.975)
) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model)) +
geom_point() +
geom_errorbar() +
theme_bw() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=.95, linetype=2) +
labs(x="Model", y="") +
ggtitle("95% Coverage of Underlying Probability Field") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
(aggPlotsDR$provcoverage <- resultsDF %>%
mutate(Model=model) %>%
filter(converge == 0) %>%
group_by(covType, rangeE, Model) %>%
summarize(
mu = mean(provcoverage),
lwr = quantile(provcoverage, probs=.025),
upr = quantile(provcoverage, probs=.975)
) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model)) +
geom_point() +
geom_errorbar() +
theme_classic() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=.95, linetype=2) +
labs(x="Model", y="") +
ggtitle("95% Coverage of Province Probability(N=32)") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE))
(aggPlotsDR$provcoveragePaper <- resultsDF %>%
mutate(Model=model) %>%
filter(converge == 0 & model != "Riemann") %>%
group_by(covType, rangeE, Model) %>%
summarize(
mu = mean(provcoverage),
lwr = quantile(provcoverage, probs=.025),
upr = quantile(provcoverage, probs=.975)
) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model)) +
geom_point() +
geom_errorbar() +
theme_bw() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=.95, linetype=2) +
labs(x="Model", y="") +
ggtitle("95% Coverage of Province Probability(N=32)") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
(aggPlotsDR$rmseRelative <- resultsDF %>%
filter(model=="Utazi") %>%
select(covType:rmse) %>%
rename(rmseUtazi=rmse) %>%
right_join(select(resultsDF, covType:rmse, model, converge)) %>%
filter(converge == 0 & model != "Ignore") %>%#rmse <.3) %>%
mutate(improveRatio=(rmseUtazi-rmse)/rmseUtazi) %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(improveRatio),
lwr = mean(improveRatio) - 1.96*(sd(improveRatio)/sqrt(n())),
upr = mean(improveRatio) + 1.96*(sd(improveRatio)/sqrt(n()))) %>%
ungroup %>%
rename(Model=model) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_classic() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Relative Improvement") +
ggtitle("RMSE: Margin of Improvement Over Utazi Model") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
geom_text(aes(y=upr), nudge_y = .04))
(aggPlotsDR$rmseRelativePaper <- resultsDF %>%
filter(model=="Ecological") %>%
select(covType:rmse) %>%
rename(rmseUtazi=rmse) %>%
right_join(select(resultsDF, covType:rmse, model, converge)) %>%
filter(converge == 0 & model != "Riemann" & model != "Ignore") %>%
mutate(improveRatio=(rmseUtazi-rmse)/rmseUtazi) %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(improveRatio),
lwr = mean(improveRatio) - 1.96*(sd(improveRatio)/sqrt(n())),
upr = mean(improveRatio) + 1.96*(sd(improveRatio)/sqrt(n()))) %>%
ungroup %>%
mutate(Model=fct_rev(model)) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_bw() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Relative Improvement") +
ggtitle("RMSE: Margin of Improvement Over Utazi Model") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
geom_text(aes(y=upr), nudge_y = .04) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
(aggPlotsDR$rmseProvRelative <- resultsDF %>%
filter(model=="Ecological") %>%
select(covType:seed, provrmse) %>%
rename(rmseUtazi=provrmse) %>%
right_join(select(resultsDF, covType:seed, provrmse, model, converge)) %>%
filter(converge == 0 & model != "Ignore") %>%
mutate(improveRatio=(rmseUtazi-provrmse)/rmseUtazi) %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(improveRatio),
lwr = mean(improveRatio) - 1.96*(sd(improveRatio)/sqrt(n())),
upr = mean(improveRatio) + 1.96*(sd(improveRatio)/sqrt(n()))) %>%
ungroup %>%
rename(Model=model) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_bw() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Relative Improvement") +
ggtitle("Province RMSE: Margin of Improvement Over Ecological Model") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
geom_text(aes(y=upr), nudge_y = .06) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
(aggPlotsDR$rmseProvRelativePaper <- resultsDF %>%
filter(model=="Ecological") %>%
select(covType:seed, provrmse) %>%
rename(rmseUtazi=provrmse) %>%
right_join(select(resultsDF, covType:seed, provrmse, model, converge)) %>%
filter(converge == 0 & model != "Riemann" & model != "Ignore") %>%
mutate(improveRatio=(rmseUtazi-provrmse)/rmseUtazi) %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(improveRatio),
lwr = mean(improveRatio) - 1.96*(sd(improveRatio)/sqrt(n())),
upr = mean(improveRatio) + 1.96*(sd(improveRatio)/sqrt(n()))) %>%
ungroup %>%
mutate(Model=fct_rev(model)) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_bw() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Relative Improvement") +
ggtitle("Province RMSE: Margin of Improvement Over Ecological Model") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
geom_text(aes(y=upr), nudge_y = .06) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
(aggPlotsDR$rmseSingleProvRelativePaper <- resultsDF %>%
filter(model=="Ecological") %>%
select(covType:seed, provrmse) %>%
rename(rmseUtazi=provrmse) %>%
right_join(select(resultsDF, covType:seed, provrmse, model, converge)) %>%
filter(converge == 0 & model != "Riemann") %>%
mutate(improveRatio=(rmseUtazi-provrmse)/rmseUtazi) %>%
group_by(model) %>%
summarize(
mu = mean(improveRatio),
lwr = mean(improveRatio) - 1.96*(sd(improveRatio)/sqrt(n())),
upr = mean(improveRatio) + 1.96*(sd(improveRatio)/sqrt(n()))) %>%
ungroup %>%
mutate(Model=fct_rev(model)) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_classic() +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Relative Improvement") +
ggtitle("Province RMSE: Margin of Improvement Over IHME Resample Model") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
geom_text(aes(y=upr), nudge_y = .06) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
(aggPlotsDR$rmsePaper <- resultsDF %>%
filter(converge == 0 & model != "Riemann" & rmse < .3) %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(rmse),
lwr = mean(rmse) - 1.96*(sd(rmse)/sqrt(n())),
upr = mean(rmse) + 1.96*(sd(rmse)/sqrt(n()))) %>%
ungroup %>%
rename(Model=model) %>%
mutate(txt=round(mu, 4)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_bw() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="RMSE") +
ggtitle("Province RMSE") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
geom_text(aes(y=upr), nudge_y = .002))
(aggPlotsDR$rmseProvPaper <- resultsDF %>%
filter(converge == 0 & model != "Riemann" & rmse < .3) %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(provrmse),
lwr = mean(provrmse) - 1.96*(sd(provrmse)/sqrt(n())),
upr = mean(provrmse) + 1.96*(sd(provrmse)/sqrt(n()))) %>%
ungroup %>%
rename(Model=model) %>%
mutate(txt=round(mu, 4)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_classic() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="RMSE") +
ggtitle("Province RMSE") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
geom_text(aes(y=upr), nudge_y = .002))
(aggPlotsDR$bias <- resultsDF %>%
filter(converge == 0) %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(bias),
lwr = quantile(bias, probs=.025),
upr = quantile(bias, probs=.975)) %>%
ungroup %>%
rename(Model=model) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_classic() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Bias") +
ggtitle("RMSE: Average Bias of Models") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE))
(aggPlotsDR$biasPaper <- resultsDF %>%
filter(converge == 0 & model != "Ignore") %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(bias),
lwr = quantile(bias, probs=.025),
upr = quantile(bias, probs=.975)) %>%
ungroup %>%
rename(Model=model) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_bw() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Bias") +
ggtitle("RMSE: Average Bias of Models") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
(aggPlotsDR$dissDiff <- resultsDF %>%
filter(converge == 0) %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(dissDiff),
lwr = quantile(dissDiff, probs=.025),
upr = quantile(dissDiff, probs=.975)) %>%
ungroup %>%
rename(Model=model) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_classic() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Bias") +
ggtitle("Dissimilarity Difference") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE))
(aggPlotsDR$dissDiffPaper <- resultsDF %>%
filter(converge == 0 & model != "Riemann") %>%
group_by(covType, model, rangeE) %>%
summarize(
mu = mean(dissDiff),
lwr = quantile(dissDiff, probs=.025),
upr = quantile(dissDiff, probs=.975)) %>%
ungroup %>%
mutate(Model=fct_rev(model)) %>%
mutate(txt=round(mu, 2)) %>%
ggplot(aes(x=Model, ymin=lwr, y=mu, ymax=upr, color=Model, label=txt)) +
geom_point() +
geom_errorbar() +
theme_classic() +
facet_grid(rangeE~covType) +
coord_flip() +
geom_hline(yintercept=0, linetype=2) +
labs(x="Model", y="Bias") +
ggtitle("Dissimilarity Difference") +
theme(panel.spacing.y = unit(0, "lines")) +
guides(color=FALSE) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
(aggPlotsDR$runtime <- resultsDF %>%
select(covType:seed, model, runtime) %>%
filter(!(model %in% c("Unmaksed"))) %>%
left_join(
resultsDF %>%
filter(model %in% c("Unmasked")) %>%
select(covType:seed, runtime) %>%
rename(IHME=runtime),
by=c("covType", "covVal", "rangeE", "seed")) %>%
ggplot(aes(x=IHME, y=runtime)) +
geom_point() +
geom_abline() +
theme_bw() +
facet_wrap(~model, scales="free_y") +
labs(x="Unmasked Runtime", y="Runtime") +
expand_limits(x = 0, y = 0) +
theme(
strip.text = element_text(size=15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
title = element_text(size=25),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=15),
axis.title.x = element_text(size=20)))
ggsave(
"demo/figures/dissSim2.png", aggPlotsDR$dissDiffPaper,
width=400, height=280, units = "mm")
ggsave(
"demo/figures/biasSim2.png", aggPlotsDR$biasPaper,
width=400, height=280, units = "mm")
ggsave(
"demo/figures/covSim2.png", aggPlotsDR$coveragePaper,
width=400, height=280, units = "mm")
ggsave(
"demo/figures/provcovSim2.png", aggPlotsDR$provcoveragePaper,
width=400, height=280, units = "mm")
ggsave(
"demo/figures/rmseSim2.png", aggPlotsDR$rmseRelativePaper,
width=400, height=280, units = "mm")
ggsave(
"demo/figures/provrmseSim2.png", aggPlotsDR$rmseProvRelativePaper,
width=400, height=280, units = "mm")
write_rds(aggPlotsDR, "~/Documents/PointPolygon/demo/aggplotsDR.Rds")
|
8e8a2211b80a8c0360027911199add90b08daa88
|
6e37685e4b0101dd50c06bbdb358bf6ae8038022
|
/R/get_payload.R
|
46c1a048a53254e4641431a32abfca8d40a6aa07
|
[] |
no_license
|
cran/mlbgameday
|
63a8226b53f3807600a26100f5ea5979fbf8bd32
|
dae629de90c55ddf26d8048a92fd4c404b776a17
|
refs/heads/master
| 2022-03-28T06:41:31.078674
| 2019-04-02T17:50:07
| 2019-04-02T17:50:07
| 112,774,760
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,944
|
r
|
get_payload.R
|
#' Get Gameday data from MLBAM.
#' @param start A start date passed as a character in ISO 8601 format. \code{"2017-05-01"}
#' @param end An end date passed as a character in ISO 8601 format. \code{"2017-09-01"}
#' @param league The league to gather gids for. The default is \code{"mlb"}. Other options include \code{"aaa"} and \code{"aa"}
#' @param dataset The dataset to be scraped. The default is "inning_all." Other options include, "inning_hit", "linescore".
#' @param game_ids A list of user-supplied gameIds.
#' @param db_con A database connection from the \code{DBI} package.
#' @param overwrite Logical. Should current database be overwritten? Inherited from the \code{dbWriteTable} function from the \code{DBI} package.
#' The default value is FALSE.
#' @param ... additional arguments
#' @importFrom DBI dbWriteTable
#' @import utils
#' @export
#' @examples
#'
#' \dontrun{
#' # Make a request for a single day.
#' df <- get_payload(start = "2016-06-01", end = "2016-06-01")
#'
#'
#' # Run larger requests in parallel.
#' library(doParallel)
#' library(foreach)
#'
#' no_cores <- detectCores() - 2
#' cl <- makeCluster(no_cores)
#' registerDoParallel(cl)
#'
#' df <- get_payload(start = "2016-01-01", end = "2017-01-01")
#'
#' stopImplicitCluster()
#' rm(cl)
#'
#' }
#'
#' # Supply your own custom vector of game ids.
#'
#' mygids <- search_gids(team = "indians", start = "2016-05-01", end = "2016-05-01")
#'
#' df <- get_payload(game_ids = mygids)
#'
#'
get_payload <- function(start=NULL, end=NULL, league="mlb", dataset = NULL, game_ids = NULL, db_con = NULL, overwrite = FALSE, ...) {
if(is.null(dataset)) dataset <- "inning_all"
message("Gathering Gameday data, please be patient...")
if(dataset=="bis_boxscore" && as.Date(end) >= '2019-01-01'){
stop("bis_boxscore dataset is only available prior to the 2019 season. Please select a different data set.")
}
if(!is.null(game_ids)){
urlz <- make_gids(game_ids = game_ids, dataset = dataset)
}
if(!is.null(start) & !is.null(end)){
if(start < as.Date("2008-01-01")){
stop("Please select a later start date. The data are not dependable prior to 2008.")
}
if(end >= Sys.Date()) stop("Please select an earlier end date.")
if(start > end) stop("Your start date appears to occur after your end date.")
start <- as.Date(as.character(start)); end <- as.Date(end); league <- tolower(league)
# Get gids via internal function.
urlz <- make_gids(start = start, end = end, dataset = dataset)
}
if(!is.null(db_con)){
# Chunk out URLs in groups of 300 if a database connection is available.
url_chunks <- split(urlz, ceiling(seq_along(urlz)/500))
innings_df=NULL
for(i in seq_along(url_chunks)){
message(paste0("Processing data chunk ", i, " of ", length(url_chunks)))
urlz <- unlist(url_chunks[i])
# inning_all and linescore contain multiple tables, so those need to be written in a loop.
if(dataset == "inning_all" | dataset=="linescore"){
if(dataset == "inning_all") innings_df <- payload.gd_inning_all(urlz)
if(dataset=="linescore") innings_df <- payload.gd_linescore(urlz)
if(isTRUE(overwrite)){
for (i in names(innings_df)) DBI::dbWriteTable(conn = db_con, value = innings_df[[i]], name = i, overwrite = TRUE)
}
if(!isTRUE(overwrite)){
for (i in names(innings_df)) DBI::dbWriteTable(conn = db_con, value = innings_df[[i]], name = i, append = TRUE)
}
} else {
if(dataset=="inning_hit"){
innings_df <- payload.gd_inning_hit(urlz)
if(isTRUE(overwrite)) DBI::dbWriteTable(conn = db_con, value = innings_df, name = "inning_hit", overwrite = TRUE)
if(isTRUE(overwrite)) DBI::dbWriteTable(conn = db_con, value = innings_df, name = "inning_hit", append = TRUE)
}
if(dataset=="game_events"){
innings_df <- payload.gd_inning_hit(urlz)
if(isTRUE(overwrite)) DBI::dbWriteTable(conn = db_con, value = innings_df, name = "game_events", overwrite = TRUE)
if(isTRUE(overwrite)) DBI::dbWriteTable(conn = db_con, value = innings_df, name = "game_events", append = TRUE)
}
if(dataset=="game"){
innings_df <- payload.gd_inning_hit(urlz)
if(isTRUE(overwrite)) DBI::dbWriteTable(conn = db_con, value = innings_df, name = "game", overwrite = TRUE)
if(isTRUE(overwrite)) DBI::dbWriteTable(conn = db_con, value = innings_df, name = "game", append = TRUE)
}
if(dataset=="bis_boxscore"){
innings_df <- payload.gd_inning_hit(urlz)
if(isTRUE(overwrite)) DBI::dbWriteTable(conn = db_con, value = innings_df, name = "bis_boxscore", overwrite = TRUE)
if(isTRUE(overwrite)) DBI::dbWriteTable(conn = db_con, value = innings_df, name = "bis_boxscore", append = TRUE)
}
}
# Manual garbage collect after every loop of 500 games.
rm(innings_df); gc()
}
DBI::dbDisconnect(db_con)
message(paste0("Transaction complete, disconnecting from the database.", " ", Sys.time()))
}
if(is.null(db_con)){
# If no database connection, just return a dataframe.
# If the returned dataframe looks like it's going to be large, warn the user.
if(length(urlz) > 3500) { # One full season including spring training and playoffs is around 3000 games.
if(utils::menu(c("Yes", "No"),
title="Woah, that's a lot of data! Are you sure you want to continue without a database connection?")!=1){
stop(message("Download stopped. Try a database connection or a smaller data set."))
}else{
message("Starting download, this may take a while...")
}
}
if(dataset == "bis_boxscore") innings_df <- payload.gd_bis_boxscore(urlz)
if(dataset == "game_events") innings_df <- payload.gd_game_events(urlz)
if(dataset == "inning_all") innings_df <- payload.gd_inning_all(urlz)
if(dataset=="inning_hit") innings_df <- payload.gd_inning_hit(urlz)
if(dataset=="linescore") innings_df <- payload.gd_linescore(urlz)
if(dataset=="game") innings_df <- payload.gd_game(urlz)
# Probably faster to do the transformation within the loop in cases where data gets very large.
#innings_df <- transform_pload(innings_df)
return(innings_df)
}
}
|
08290e2c01655aff4668ee34c37e6a43ef49941d
|
f7a4328c2c33fe5c1a4fc7895cffabac75f3d40c
|
/man/plotGraphic.Rd
|
1bc8b5bb49f79a27fc6158a517b13e64b238ea99
|
[] |
no_license
|
Frederic-bioinfo/MetaFeatures
|
447834d966c4f7ec140a366666e8de7878f34f94
|
69a625852597ab72ebc339da98b2d11d36770440
|
refs/heads/master
| 2021-01-18T12:45:11.243150
| 2014-06-18T19:25:08
| 2014-06-18T19:25:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
rd
|
plotGraphic.Rd
|
\name{plotGraphic}
\alias{plotGraphic}
\title{
Produce a plot with based on a data.frame
}
\description{
This function will produce the final plot for an analysis.
}
\usage{
plotGraphic(
DF,
title)
}
\arguments{
\item{DF}{The data frame produced by the getDataFrame
function}
\item{title}{The title of the graph}
}
\value{
\code{plotGraphic} plots a graph on the current device.
}
\author{
Charles Joly Beauparlant
<Charles.Joly-Beauparlant@crchul.ulaval.ca>
}
|
dfddda3b2287a017c2b760c73b3a71897839434b
|
af3aad98bff24bcae224ee377b47ebfb05e9ba84
|
/man/play_game.Rd
|
dce9916194ec243ec2f5f25e68e5451128ae96a4
|
[] |
no_license
|
bbmoren2/montyhall
|
39cb0ac99ccbadc953d9fab64430373740450e60
|
745e668b4c2c51a6bdc3dbed421d53b04ac0d26c
|
refs/heads/main
| 2023-07-19T05:20:06.452409
| 2021-09-11T00:33:27
| 2021-09-11T00:33:27
| 405,245,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 711
|
rd
|
play_game.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monty-hall-problem.R
\name{play_game}
\alias{play_game}
\title{The whole Monty Hall Problem game}
\usage{
play_game()
}
\arguments{
\item{...}{no arguments are used by the function.}
}
\value{
The function returns "WIN" or "LOSE" as the game.results
}
\description{
\code{pkay_game()} combines all of the game functions to quickly and easily generate game conditions and the subsequent win/lost outcome.
}
\details{
In one function, a game is set-up, an initial door selected, a goat door opened, the stay/switch decision made, and the final outcome associated with the final selected door is revealed.
}
\examples{
play_game()
}
|
dbc89a1f3911e1ca68c06c1fcbb55736d6613071
|
1fd8b00e9265e4998e5b76ea020f2420853b5875
|
/puts_posts_cnt.R
|
1a702d2bb16339576a131bbab2eafb1f5af8e4ac
|
[] |
no_license
|
avrao/log_analysis_r
|
b705f09936c1af1f34e6e9f6802edf786b8e4ca4
|
950f15aebf63e66e5c99cd5806c52eae67743f04
|
refs/heads/master
| 2020-03-28T12:08:09.745261
| 2018-09-12T16:24:35
| 2018-09-12T16:24:35
| 148,271,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 842
|
r
|
puts_posts_cnt.R
|
# How many puts / posts were there.
library(ggplot2)
library(stringr)
#access.log is required to be included int eh same directory
df = read.table('access.log')
#head(df)
colnames(df) = c('ip_address', 'nameless_v2', 'nameless_v3', 'date', 'nameless_v5', 'request', 'status', 'bytes', 'url', 'browser_specs')
head(df['request'])
df$request = as.vector(str_sub(df$request, 1,4))
x <- as.vector(df$request)
print(typeof(x))
put_post <- or("POST", "PUT")
contains_post <- str_detect(x , pattern = START %R% put_post)
sum_post <- sum(contains_post)
reqs = as.data.frame(table(df$request[contains_post]))
reqs
ggplot( data = reqs, aes(x = reqs$Var1, y = reqs$Freq) ) + geom_bar(stat = 'identity', fill = reqs$Freq) + xlab('Put vs POST') + ylab('Count') + ggtitle('Total PUT & POST in URLs')
ggsave("plot_put_post.png", width = 15, height = 15)
|
794c2a4cb3f13dcb20ff36a11559bc451dbf4711
|
fcb1476bffeb6e1016680c23b32ce83221c1d6f5
|
/EDA.R
|
ea30d257a27022d3afbb3802867054197d8d84b2
|
[
"MIT"
] |
permissive
|
jcarlosmayo/helsinki_housing
|
fc0ab1c0aaacc8172720a1b5ec1290eb98a8877b
|
dbfa8934dc4ebf8d44e9087f1c8fb113cef2dd2a
|
refs/heads/master
| 2020-05-15T23:35:31.215697
| 2016-01-25T16:40:10
| 2016-01-25T16:40:10
| 30,873,150
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,949
|
r
|
EDA.R
|
# Load required libraries
library(ggplot2); library(dplyr); library(RSvgDevice)
#######################
# Load all clean data #
#######################
files <- dir("clean_data/")
clean_data <- data.frame()
for (i in files){
print(paste("clean_data/", i, sep=""))
rds <- readRDS(paste("clean_data/", i, sep=""))
clean_data <- rbind(clean_data, rds)
}
# Remove all duplicates
dup <- duplicated(clean_data[,2])
clean_data <- clean_data[!dup,]
rm(i, rds, files, dup)
##############
# HISTOGRAMS #
##############
# SIZE
ggplot(clean_data, aes(x = size, fill = type)) +
geom_histogram(binwidth=2.5, color="white") +
coord_cartesian(xlim = c(quantile(clean_data$size, 0.025),
quantile(clean_data$size, 0.975))) +
scale_fill_manual(breaks=c("apartment","terrace","semi-detached", "single house"),
values=c("#7D4F7D", "#2EA197", "#D19C49", "#D14A41"),
name="Housing type ") +
theme(legend.position="bottom",
legend.title = element_text(colour="#3F4E59", size=16),
legend.text = element_text(colour="#3F4E59", size = 14),
panel.background = element_rect(fill = "white"),
panel.grid.major.y = element_line(colour = "grey")) +
xlab(expression(paste("size ", m^2, sep="")))
# PRICE
ggplot(clean_data, aes(x = price, fill = type)) +
geom_histogram(binwidth=50, color="white") +
coord_cartesian(xlim = c(quantile(clean_data$price, 0.025),
quantile(clean_data$price, 0.975))) +
scale_fill_manual(breaks=c("apartment","terrace","semi-detached", "single house"),
values=c("#7D4F7D", "#2EA197", "#D19C49", "#D14A41")) +
theme(legend.position="none",
panel.background = element_rect(fill = "white"),
panel.grid.major.y = element_line(colour = "grey")) +
xlab("price EUR")
#############################
# SCATTER PLOT PRICE ~ SIZE #
#############################
ggplot(clean_data, aes(x = size, y = price, color=city)) +
geom_point() +
coord_cartesian(xlim = c(quantile(clean_data$size, 0.025),
quantile(clean_data$size, 0.975)),
ylim = c(quantile(clean_data$price, 0.025),
quantile(clean_data$price, 0.975)))
#########################
# Price by type by city #
#########################
ggplot(clean_data,
aes(x = city, y = price, color = city)) +
geom_boxplot(outlier.colour = "white") +
geom_point(alpha = 0.2) +
coord_cartesian(ylim = c(quantile(clean_data$price, 0.025),
quantile(clean_data$price, 0.975))) +
theme(legend.position="none",
legend.title = element_text(colour="#3F4E59", size=16),
legend.text = element_text(colour="#3F4E59", size = 14),
panel.background = element_rect(fill = "white"),
panel.grid.major.y = element_line(colour = "grey")) +
xlab("") +
ylab("price EUR")
###############
# CITY GROUPS #
###############
city_group <- subset(clean_data, po_code != "", select=c(city, price_sqm, type)) %>%
group_by(city) %>%
summarise(mean_price_sqm = mean(price_sqm),
n = n())
mean_values = tapply(city_group$n, as.factor(city_group$city), mean)
barplot(city_group$n, names.arg = c("ESPOO", "HELSINKI", "VANTAA"),
col = c("#E56AD0", "#68b3f6", "#61dd45"), border = NA, density = 20,
axes = FALSE)
# Heatmap Type ~ City
city_group <- subset(clean_data, po_code != "", select=c(city, price_sqm, type)) %>%
group_by(city, type) %>%
summarise(mean_price_sqm = mean(price_sqm),
n = n())
ggplot(city_group, aes(x = type, y = city)) +
geom_tile(aes(fill = mean_price_sqm), colour = "white") +
scale_fill_gradient(low = "#2EA197", high = "#D14A41",
name=expression(paste("Average price\n EUR /", m^2, sep=""))) +
theme(legend.title = element_text(colour="#3F4E59", size=16),
legend.text = element_text(colour="#3F4E59", size = 14),
panel.background = element_rect(fill = "white"),
panel.grid.major.y = element_line(colour = "white")) +
xlab("") +
ylab("") +
ggtitle("by dwelling type")
# Check the hot points, it may be that there are few or even just one observation, which makes it
# not especially representative of the average price per square meter
# sum(clean_data$city == "Espoo" & clean_data$type == "single house")
##########
# AGENTS #
##########
# Plot number of rentals per agent
ggplot(clean_data, aes(x = agent)) +
geom_bar(position = "dodge", fill="#2EA197") +
coord_flip() +
theme(panel.background = element_rect(fill = "white"),
panel.grid.major.x = element_line(colour = "grey")) +
xlab("Real state agents") +
ylab("Amount of rentals")
# Create group
agent_group <- subset(clean_data, po_code != "", select=c(city, price_sqm, type, agent)) %>%
group_by(agent, city, type) %>%
summarise(mean_price_sqm = mean(price_sqm),
n = n())
ggplot(agent_group, aes(y = mean_price_sqm, x = agent)) +
geom_boxplot() +
coord_flip()
ggplot(agent_group, aes(x = type, y = agent)) +
geom_tile(aes(fill = mean_price_sqm), colour = "white") +
scale_fill_gradient(low = "#2EA197", high = "#D14A41",
name=expression(paste("Average price\n EUR /", m^2, sep=""))) +
theme(legend.position="bottom",
legend.title = element_text(colour="#3F4E59", size=16),
legend.text = element_text(colour="#3F4E59", size = 14),
panel.background = element_rect(fill = "white"),
panel.grid.major.y = element_line(colour = "white")) +
xlab("") +
ylab("") +
ggtitle("by dwelling type")
# Check the hot points, it may be that there are few or even just one observation, which makes it
# not especially representative of the average price per square meter for that given real state agency
# sum(clean_data$agent == "Vuokrahuone" & clean_data$type == "apartment")
ggplot(agent_group, aes(x = city, y = agent)) +
geom_tile(aes(fill = mean_price_sqm), colour = "white") +
scale_fill_gradient(low = "#2EA197", high = "#D14A41",
name=expression(paste("Average price\n EUR /", m^2, sep=""))) +
theme(legend.position="bottom",
legend.title = element_text(colour="#3F4E59", size=16),
legend.text = element_text(colour="#3F4E59", size = 14),
panel.background = element_rect(fill = "white"),
panel.grid.major.y = element_line(colour = "white")) +
xlab("") +
ylab("") +
ggtitle("by city")
# Check the hot points, it may be that there are few or even just one observation, which makes it
# not especially representative of the average price per square meter for that given real state agency
# sum(clean_data$agent == "Vuokrahuone" & clean_data$city == "Helsinki")
|
31b60f566ec7ec69f2c456fed4bcbcc91105aa02
|
7ac79798b6c04c6fefa2c6738c0fd4dcfcebfe48
|
/R-Neural-Network/nnplay.r
|
5aba6641375e338e93ce4b43db5219852413adcd
|
[] |
no_license
|
noiseux1523/Machine-Learning
|
34e724d064fc776649157202eed7d183bfae47a8
|
ef8a8820f4d9af526286f9597fcbaabe54bd9a31
|
refs/heads/master
| 2021-01-25T12:49:58.902351
| 2018-03-02T15:54:45
| 2018-03-02T15:54:45
| 123,517,375
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,215
|
r
|
nnplay.r
|
##
## must have the next two
## install.packages("neuralnet")
## install.packages('caTools')
##
## nice and optional
##
## install.packages("plyr")
## install.packages('ISLR')
##
## to play with rnn
##
## install.packages('rnn')
##
library(neuralnet)
library(caTools)
# data.path <- "Desktop/Neural-Network/dataset/data"
# in.file <-"metrics-smells-psatd-msatd-argoUML-034.csv"
# in.file <-"metrics-smells-psatd-msatd-sql12.csv"
# in.file <-"metrics-smells-psatd-msatd-apache-jmeter-2.10.csv"
# in.file <-"metrics-smells-psatd-msatd-apache-ant-1.7.0.csv"
# in.file <-"metrics-smells-psatd-msatd-jruby-1.4.0.csv"
# in.file <-"metrics-smells-psatd-msatd-jfreechart-1.0.19.csv"
# in.file <-"metrics-smells-psatd-msatd-jEdit.csv"
# in.file <-"metrics-smells-psatd-msatd-hibernate-distribution-3.3.2.GA.csv"
# in.file <-"metrics-smells-psatd-msatd-columba-1.4-src.csv"
#
# paste(data.path,in.file,sep="/")
# d <- read.csv(paste(data.path,in.file,sep="/"), header = T, sep = ";")
#
# ## remove predicted variables, strings and metrics leading to NaN
# ds <- subset (d, select = -c (Entities,RULE,Entity,File,Class,MSATDTag,
# MSATDNum,NOPM, NOTC,CP, DCAEC, ACAIC,DCMEC,EIC,
# EIP,PP,REIP,RRFP,RRTP,USELESS, RFP,RTP,CBOin,FanOut, CLD,NOC,NOD , NCP))
#
# ## use this as variable to predict
# yes <- as.numeric(d$MSATDNum)
#
# ## get the array of max and min
# maxs <- apply(ds, 2, max)
# mins <- apply(ds, 2, min)
#
# ## normalize input in [-1,1]
# scaled.data <- as.data.frame(scale(ds,center = mins, scale = maxs - mins))
#
# ## check no NaN is there !!!
# scaled.data[1,]
#
# ## add variable to predict
# data = cbind(yes,scaled.data)
#
# ## set seed of really needed
# ## set.seed(777)
#
# ## create training and test set
# split = sample.split(data$yes, SplitRatio = 0.70)
#
# ## Split based off of split Boolean Vector
# train = subset(data, split == TRUE)
# test = subset(data, split == FALSE)
#
# ## get out names and use to build the formulae
# feats <- names(scaled.data)
#
# ## Concatenate strings
# f <- paste(feats,collapse=' + ')
#
# ## add the variable to predict
# f <- paste('yes ~',f)
#
# # Convert to formula
# f <- as.formula(f)
# print (f)
##
## To loop over te 9 systems !!
##
data.path <- "Desktop/Neural-Network/R-Neural-Network/data"
systems <- c("metrics-smells-psatd-msatd-apache-ant-1.7.0.csv",
"metrics-smells-psatd-msatd-argoUML-034.csv",
"metrics-smells-psatd-msatd-apache-jmeter-2.10.csv",
"metrics-smells-psatd-msatd-jruby-1.4.0.csv",
"metrics-smells-psatd-msatd-sql12.csv",
"metrics-smells-psatd-msatd-jfreechart-1.0.19.csv",
"metrics-smells-psatd-msatd-jEdit.csv",
"metrics-smells-psatd-msatd-hibernate-distribution-3.3.2.GA.csv",
"metrics-smells-psatd-msatd-columba-1.4-src.csv" ,
"metrics-smells-psatd-msatd-apache-jmeter-2.10.csv")
# Loop for the number of systems
for (s in systems) {
print(s)
in.file <- s
d <- read.csv(paste(data.path,in.file,sep="/"), header = T, sep = ";")
## Remove predicted variables, strings and metrics leading to NaN
ds <- subset (d, select = -c (Entities,RULE,Entity,File,Class,MSATDTag,
MSATDNum,NOPM, NOTC,CP, DCAEC, ACAIC,DCMEC,EIC,
EIP,PP,REIP,RRFP,RRTP,USELESS, RFP,RTP,CBOin,FanOut, CLD,NOC,NOD , NCP))
## Use this as variable to predict
yes <- as.numeric(d$MSATDNum)
## Get the array of max and min
maxs <- apply(ds, 2, max)
mins <- apply(ds, 2, min)
## Normalize input in [-1,1]
scaled.data <- as.data.frame(scale(ds,center = mins, scale = maxs - mins))
## Check no NaN is there !!!
scaled.data[1,]
## Add variable to predict
data = cbind(yes,scaled.data)
## Get out names and use to build the formulae
feats <- names(scaled.data)
## Concatenate strings
f <- paste(feats,collapse=' + ')
## Add the variable to predict
f <- paste('yes ~',f)
## Convert to formula
f <- as.formula(f)
## Initialize variable to store scores
score <- c()
##
## HARDCODED: Loop the number of repetition
##
for (i in c(1:20)) {
print(i)
## create training and test set
# split = sample.split(data$yes, SplitRatio = 0.70)
## Split based off of split Boolean Vector
# train = subset(data, split == TRUE)
# test = subset(data, split == FALSE)
#Randomly shuffle the data
data_shuffle<-data[sample(nrow(data)),]
#Create 10 equally size folds
folds <- cut(seq(1,nrow(data_shuffle)),breaks=10,labels=FALSE)
#Prediction array
d <- dim(data_shuffle)[1]
p.vect <- array(0,d)
##
## HARDCODED: Loop for the number of folds
##
for (k in c(1:10)) {
## Segment your data by fold using the which() function
testIndexes <- which(folds==k,arr.ind=TRUE)
test <- data_shuffle[testIndexes, ]
train <- data_shuffle[-testIndexes, ]
##
## train the neural network
##
## notice the hidden vectors tell how many layers and node per layers
## thus hidden=c(20,15,10) is a 3-layers where layers have 20, 15 and 10
## nodes respectively
##
## there is a randomness in training to get the same results between
## runs we must have the same seed !
## set.seed(137)
## learn 3 network and average results basically tru to play with ensample learning ...
nn1.config=c(40,20)
#nn2.config=c(40,20,20)
#nn3.config=c(10,10,10)
nn1 <- neuralnet(f,train,hidden=nn1.config,
learningrate = 0.0001,
#threshold = 0.5
stepmax = 5e7,
linear.output=FALSE)
# nn2 <- neuralnet(f,train,hidden=nn2.config,
# learningrate = 0.0001,
# #threshold = 0.5
# stepmax = 5e7,
# linear.output=FALSE)
# nn3 <- neuralnet(f,train,hidden=nn3.config,
# learningrate = 0.0001,
# #threshold = 0.5
# stepmax = 5e7,
# linear.output=FALSE)
## make sure visually we have the right dimensions ...
predicted.nn1.values <- compute(nn1,test[2:53])
# predicted.nn2.values <- compute(nn2,test[2:53])
# predicted.nn3.values <- compute(nn3,test[2:53])
## round to 0,1
predicted.nn1.values$net.result <- sapply(predicted.nn1.values$net.result,round,digits=0)
# predicted.nn2.values$net.result <- sapply(predicted.nn2.values$net.result,round,digits=0)
# predicted.nn3.values$net.result <- sapply(predicted.nn3.values$net.result,round,digits=0)
index <- 1
for(j in as.numeric(rownames(test))) {
p.vect[j] <- predicted.nn1.values$net.result[index]
index <- index + 1
}
}
## make a bet if it is greated than zero say it was one I mean if just one net say yes
## hope it is really yes ...
## HARDCODED: The division is made based on the number of folds
# predicted.nn.values <- (predicted.nn.values /10)
# predicted.nn.values <- sapply(predicted.nn.values, function(x) ifelse (x>0.5, 1,0 ))
# predicted.nn.values <- sapply(predicted.nn.values, function(x) ifelse (x>0, 1,0))
## uild and inspect the 3 confusion matrices
# confusion.mat2.nn1 <- table(test$yes,predicted.nn1.values$net.result)
# confusion.mat2.nn2 <- table(test$yes,predicted.nn2.values$net.result)
# confusion.mat2.nn3 <- table(test$yes,predicted.nn3.values$net.result)
# confusion.mat2.nn1
# confusion.mat2.nn2
# confusion.mat2.nn3
## build the ensable confusion matrix
confusion.mat <- table(data$yes,p.vect)
# print(confusion.mat2 )
## basic accuracy measures ...
recall <- confusion.mat[2,2]/(confusion.mat[2,2]+confusion.mat[2,1])
precision <- confusion.mat[2,2]/(confusion.mat[2,2]+confusion.mat[1,2])
specificity <- confusion.mat[1,1]/(confusion.mat[1,1]+confusion.mat[1,2])
F1 <- 2*confusion.mat[2,2]/(2*confusion.mat[2,2]+confusion.mat[1,2]+confusion.mat[2,1])
F2 <- 2*precision*recall/(recall+precision)
## HARDCODED: Division based on the number of k-folds
print(paste("Precision: ", precision,
"Recall: ", recall,
"Specificity: ", specificity,
"F1: ", F1,
"F2: ", F2,
sep=" "))
score <- rbind(score, c(recall, precision, specificity, F1, F2))
}
## build file name
storename <- gsub("metrics-smells-psatd-msatd-","NN-results-",s)
## bind column names ...
colnames(score) <- c("recall", "precision", "specificity", "F1","F2")
## save data ...
write.table(format(score, scientific=FALSE),
col.names = T,
row.names = F,
file = paste("Desktop/Neural-Network/R-Neural-Network", storename, sep="/"), sep=";")
}
|
e9c5c6a619c2d962b8b3d6e56c1a353e839c8e97
|
bdc3d3565cb86add876cd7afe7455f9fb415ddd8
|
/R/generate.R
|
dfea40cc59faf72f462a81121ede3d001581ac50
|
[
"BSD-3-Clause"
] |
permissive
|
jbencook/dishonest-casino
|
53ad7d5ce88511846b38373e7ba3064237b8a616
|
ef32cfd8ed328cef1d320898eb40fdb5b8fa5182
|
refs/heads/master
| 2021-01-22T05:05:59.456593
| 2013-07-16T14:16:05
| 2013-07-16T14:16:05
| 11,451,141
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
r
|
generate.R
|
#The first die is fair:
die <- "F"
casino <- die
cat("Generating 10,000 rolls... \n\n")
rolls <- NULL
for (i in 1:10000){
if (die == "F"){
rollx <- sample(1:6,1)
y <- runif(1)
if (y <= 0.05)
{
newdie <- "L"
} else {newdie <- "F"}
}
if (die=="L"){
x <- runif(1)
x1 <- floor(x*10)+1
if(x1 <= 5){
rollx <- x1
} else{
rollx <- 6}
y <- runif(1)
if (y <= 0.1) {
newdie <- "F"
} else {newdie <- "L"}
}
rolls <- c(rolls,rollx)
casino <- c(casino,die)
die <- newdie
}
casino <- casino[1:10000]
rm(list=ls()[which(!(ls() %in% c("casino","rolls")))])
|
9ab79247c9cb590ac53f2814e1f4135284bbb291
|
c4f2df4cd1610124fa0a72a62254a97c33e4b7d6
|
/package/BEAST/man/OrthogonalityReg.Rd
|
647d2ad91512f813767e0073431e57d6cc6f8fa7
|
[] |
no_license
|
jeremylhour/alternative-synthetic-control-sparsity
|
bc5136e91dedd356a31fc91bc0b2d5f09205a193
|
a37330d24789b739b5b7a4e6a8b576f095786f2a
|
refs/heads/master
| 2023-05-03T03:16:06.835203
| 2021-05-19T19:10:19
| 2021-05-19T19:10:19
| 52,441,372
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,715
|
rd
|
OrthogonalityReg.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/OrthogonalityReg.R
\name{OrthogonalityReg}
\alias{OrthogonalityReg}
\title{Function to compute mu, the covariate balancing weights}
\usage{
OrthogonalityReg(y, d, X, beta, method = "WLSLasso", c = 1.1,
nopenset = c(1), RescaleY = F, maxIterPen = 10000,
maxIterLasso = 1e+06, tolLasso = 1e-06, PostLasso = F, trace = F)
}
\arguments{
\item{y}{Outcome variable (not normalized).}
\item{X}{Matrix of covariates.}
\item{beta}{Calibrating parameter estimate from the first step.}
\item{method}{One of "WLSLasso" or "LinearOutcome".}
\item{c}{Constant for the overall penalty level.}
\item{nopenset}{Set of indices that should not be penalized. Default is intercept penalized.}
\item{RescaleY}{if TRUE rescale variable y.}
\item{maxIterPen}{Maximal number of iterations for penalty estimation.}
\item{maxIterLasso}{Maximal number of iterations in Lasso procedure.}
\item{tolLasso}{Tolerance for stopping criterion in Lasso minimization.}
\item{PostLasso}{if TRUE computes the PostLasso solution.}
\item{trace}{if TRUE print convergence info.}
}
\value{
SHat Set of indices of non-zero elements in estimated mu.
muLasso Lasso solution.
muPL Post-Lasso solution.
lambda Overall penalty level.
psi Covariate-specific penalty loadings.
nbIter Number of iterations for penalty level estimation.
convergence 0 if convergence, -555 if not because of Lasso minimization, -999 if not because of penalty estimation.
}
\description{
Second step of the BEAST estimator. Uses the LassoFISTA function to perform L1-penalised minimization.
A constant must be included as the first column in X.
Last edited: 19 fevrier 2016.
}
|
52c5a1bddba1e2cde46dc2b6a9d1a3b381a546d8
|
fc29db7345a87579142ce3538f0f50ed427c4331
|
/PREDICT 401/Assignments/Assignment 2/A2_template.R
|
14937cd6b7d5067059b101269f31c78e3f14e9bd
|
[] |
no_license
|
estelle3627/MSPA
|
8a65b14b059a0a61daf93a32e7ee917e415f58d4
|
4ce30f13f362ca25fc845bb3a3b600fe6d4814d0
|
refs/heads/master
| 2021-01-20T09:13:31.666384
| 2017-01-13T09:51:10
| 2017-01-13T09:51:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,543
|
r
|
A2_template.R
|
#Predict 401
##Data Analysis Assignment 2
#----------------------------------------------------------------------------
# Part 1 Table Construction and Probability Calculations
#----------------------------------------------------------------------------
#This part of the assignment will use the data in "Hospital.csv". Refer to Black
#Business Statistics page 15 for the data dictionary, and Chapter 4 page 140
#problem 2. Use the code in Appendix A of this assignment. Review the problem and
#execute the code. Comment statements document the program. The table that's
#generated will be used in Part 2.
#----------------------------------------------------------------------------
# Predict 401 Data Analysis Project 1
# Appendix A
#----------------------------------------------------------------------------
hospital <- read.csv(file.path("data/Hospital.csv"),sep=",")
str(hospital)
# Page 15 of Black has a hospital data dictionary.
# Chapter 4 page 140 problem 2.
# To generate table with margins, it is necessary to convert the variables to factors.
# In this case, it is equivalent to generating nominal variables for table construction.
control <- factor(hospital$Control)
region <- factor(hospital$Geog..Region)
control_region <- table(control, region)
# Check the structure and print out the table.
str(control_region)
control_region
# Add marginal totals and rename for simplicity. Print the table.
# The table frequencies can be indexed by row and column.
m_c_r <- addmargins(control_region)
m_c_r
# Use of labeling with factors.
control <- factor(hospital$Control, labels = c("G_NFed","NG_NP","Profit","F_GOV"))
region <- factor(hospital$Geog..Region, labels = c("So","NE","MW",'SW',"RM","CA","NW"))
control_region <- table(control, region)
addmargins(control_region)
# The following calculations are for problem 2.
# Probability hospital is in Midwest if for-profit?
m_c_r[3,3]/m_c_r[3,8]
# Probability hospital is government federal if in the South?
m_c_r[1,1]/m_c_r[5,1]
# Probability Rocky Mountain or NP Government?
(m_c_r[5,3]+m_c_r[2,8]-m_c_r[2,3])/m_c_r[5,8]
# Probability for-profit in California?
m_c_r[3,6]/m_c_r[5,8]
# Extra problem: Probability Control=2 but not Region 2 or 3?
x <- m_c_r[2,8]-m_c_r[2,2]-m_c_r[2,3]/m_c_r[2,8]
# Chapter 5 page 180 problem 2----------------------------
# Breakdown of hospitals by service: general hospital=1, psychiatric=2.
# Create a factor out of Service and form a table.
service <- factor(hospital$Service, labels = c("medical", "psychiatric"))
service <- table(service)
addmargins(service)
# Chapter 6 page 220 problem 3---------------------------
# Chapter 7 page 254 problem 3---------------------------
# Exact binomial probability
# Normal approximation with continuity correction.
# Second problem
#----------------------------------------------------------------------------
# Part 2 Probability Calculations
#----------------------------------------------------------------------------
#Refer to Black Business Statistics Chapter 4 page 140 problem 2, Chapter 5 page
#180 problem 2 and Chapter 6 page 220 problem 3. Answer the questions in these
#problems. The table constructed in Part 1 will be needed. Use library functions
#dbinom(), dhyper() and pexp(). Lander, R for Everyone pages 185-186 lists various
#library functions. If you have questions, for example with pexp(), type ?pexp()
#into the console for information. The results of these calculations will be
#needed for the quiz.
#----------------------------------------------------------------------------
# Predict 401 Data Analysis Project 1
# Appendix B
#----------------------------------------------------------------------------
hospital <- read.csv(file.path("data/Hospital.csv"),sep=",")
str(hospital)
# To generate table with margins, it is necessary to convert the variables to factors.
# In this case, it is equivalent to generating nominal variables for table construction.
control <- factor(hospital$Control)
region <- factor(hospital$Geog..Region)
control_region <- table(control, region)
# Check the structure and print out the table.
str(control_region)
control_region
# Add marginal totals and rename for simplicity. Print the table.
# The table frequencies can be indexed by row and column.
m_c_r <- addmargins(control_region)
m_c_r
# Use of labeling with factors.
control <- factor(hospital$Control, labels = c("G_NFed","NG_NP","Profit","F_GOV"))
region <- factor(hospital$Geog..Region, labels = c("So","NE","MW",'SW',"RM","CA","NW"))
control_region <- table(control, region)
addmargins(control_region)
# Evaluation of sample size selection rules.
# Exact probability calculation.
#p <- 0.05 # This is where different probabilities may be substituted.
#p <- 0.2
#p <- 0.3
#p <- 0.4
p <- 0.5
#p <- 0.025
#p <- 4/9
sample_size <- numeric(0)
tail_prob <- numeric(0)
for (i in 1:80) # Changes to 80 can lengthen or shorten the x-axis.
{N <- i*5 # Steps of 5 are being used.
Np <- N*p
sample_size[i] <- N
x <- Np+ 1.644854*sqrt((N*p*(1-p)))
tail_prob[i] <- pbinom(x, size = N, prob = p, lower.tail = FALSE, log.p = FALSE)}
N_size1 <- 5/p
N_size2 <- 9.0*(1-p)/p
N_size3 <- 15/(p*(1-p))
N_size1
N_size2
N_size3
plot(sample_size, tail_prob, type = "b", col = "blue", ylim = c(0, 0.125),
main = "Exact")
abline(h = 0.05)
abline(h = c(0.025, 0.075), col = "red")
abline(v = N_size1, col = "green")
abline(v = N_size2, col = "black")
abline(v = N_size3, col = "purple")
#legend("bottom", legend=c("green is np >= 5","black is np >= 9(1-p)", "purple is np(1-p) >= 15"))
# Black: Page 140 Problem 2:
# Use the hospital database. Construct a cross-tabulation table for region and
# for type of control. You should have a 7x4 table. Using this table, answer
# the following questions. (Refer to Chapter 1 for category members.)
# What is the probability that a randomly selected hospital is in the Midwest
# if the hospital is known to be for-profit?
m_c_r[3,3]/m_c_r[3,8]
#0.2444444
# If the hospital is known to be in the South, what is the probability that it
# is a government, non-federal hospital?
m_c_r[1,1]/m_c_r[5,1]
#0.3035714
# What is the probability that a hospital is in the Rocky Mountain region or a
# not-for-profit, non-government hospital?
(m_c_r[5,5]+m_c_r[2,8]-m_c_r[2,5])/m_c_r[5,8]
#0.485
# What is the probability that a hospital is a for-profit hospital located in
# California?
m_c_r[3,6]/m_c_r[5,8]
#0.045
# Black: Chapter 5 page 180 problem 2:
# Use the hospital database.
# Create a factor out of Service and form a table
service <- factor(hospital$Service, labels = c("medical", "psychiatric"))
service <- table(service)
service <- addmargins(service)
service
# What is the breakdown between hospitals that are general medical hospitals
# and those that are psychiatric hospitals in this database of 2000 hospitals?
# (Hint: In Service, 1 = general medical and 2 = psychiatric.)
service[1:1] # medical
service[2:2] # psychiatric
# Using these figures and the hypergeometric distribution, determine the
# probability of randomly selecting 16 hospitals from the database and getting
# exactly 9 that are psychiatric hospitals.
dhyper(x = 9, m = 32, n = 168, k = 16, log = F)
# Now, determine the number of hospitals in this database that are for-profit
# (Hint: In Control, 3 = for-profit.)
length(which(hospital$Control == 3))
# From this number, calculate p, the proportion of hospitals that are
# for-profit.
length(which(hospital$Control == 3)) / length(hospital$Control)
# Using this value of p and the binomial distribution, determine the probability
# of randomly selecting 30 hospitals and getting exactly 10 that are for-profit.
dbinom(x = 10, size = 30, prob = 0.225, log = F)
# Black: Chapter 6 page 220 problem 3:
# Use the hospital database.
# It can be determined that some hospitals admit around 50 patients per day.
# Suppose we select a hospital that admits 50 patients per day. Assuming that
# admittance only occurs within a 12-hour time period each day, and that
# admittance is Poisson distributed, what is the value of lambda per hour for
# this hospital?
lambda <- 50/12
# What is the interarrival time for admittance based on this figure?
arrival <- 1/lambda
arrival
# Suppose a person was just admitted to the hospital. What is the probability
# that it would be more than 30 minutes before the next person was admitted?
pexp(q = 3/6, rate = lambda, lower.tail = F, log.p = F)
# What is the probability that there would be less than 10 minutes before the
# next person was admitted?
pexp(q = 1/6, rate = lambda, lower.tail = T, log.p = F)
#----------------------------------------------------------------------------
# Part 3 Comparison of Probability Calculations
#----------------------------------------------------------------------------
#Refer to Black Business Statistics Chapter 7 page 254 problem 3. This problem
#will require the table constructed in Part 1. Assume the binomial distribution
#can be used without a finite population correction. Complete the following
#calculations:
#1) Determine the probability (using data from Part 1), and calculate the exact
#result using pbinom(). (Note that pbinom() does not include 225 in the upper tail
#unless it is started at 224.) Use the function pnorm() with continuity correction
#to approximate this probability.
#2) For the last calculation, subtract 1 to start the pbinom() calculation at the
#right point for the lower tail. (i.e. start at 39 and request the lower tail).
#Determine the exact binomial probability and also use the normal approximation
#with continuity correction to estimate the probability.
# Black: Chapter 7 problem 3 page 254:
# Use the hospital database.
# Determine the proportion of hospitals that are under the control of
# nongovernment not-for-profit organizations (Control = 2). Assume that this
# proportion represents the entire population for all hospitals.
p <- m_c_r[2,8]/m_c_r[5,8]
# If you randomly selected 500 hospitals from across the United States, what is
# the probability that 45% or more are under the control of nongovernment
# not-for-profit organizations?
# Notice we subtract 1 from q!!
# Exact binomial probability:
pbinom(q = ((0.45*500)-1), size = 500, prob = p, lower.tail = F)
# Or:
1 - pbinom(q = ((0.45*500)-1), size = 500, prob = p, lower.tail = T)
# This is a data check (apparently)
x <- c(seq(1, 224, by = 1))
1 - sum(dbinom(x = x, size = 500, prob = p, log = F))
# Continuity correction:
z <- (0.45 - p)/(sqrt(p*(1-p)/500))
pnorm(z, mean = 0, sd = 1, lower.tail = F, log.p = F)
# If you randomly selected 100 hospitals, what is the probability that less
# than 40% are under the control of nongovernment not-for-profit organizations?
# We also subtract 1 from Q here, because while lower.tail = T, we pair it with
# less than, which is equivalent to lower.tail = F, and then pairing it with
# greater than. See example below.
# Exact binomial probability:
pbinom(q = ((0.40*100)-1), size = 100, prob = p, lower.tail = T)
# And now we do the inverse, "unadjusted", which gives the same result:
pbinom(q = 0.60*100, size = 100, prob = 1-p, lower.tail = F)
# Continuity correction:
z <- ((0.395)-p)/(sqrt(p*(1-p)/100))
pnorm(z, mean = 0, sd = 1, lower.tail = T, log.p = F)
#----------------------------------------------------------------------------
# Part 4 Study of Distributional Convergence
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Data Analysis Assignment 2 Quiz
#----------------------------------------------------------------------------
#Question 1
#Refer to your analysis of Hospital.csv Chapter 4 page 140 problem 2.
#The probability is 0.24 that a randomly selected hospital, known to be
#for-profit, is in the Midwest.
#xTrue
#False
#Question 2
#Refer to your analysis of Hospital.csv Chapter 4 page 140 problem 2.
#If a randomly chosen hospital is known to be in the South, the probability it
#is a government, nonfederal hospital is 0.198.
#True
#xFalse
#Question 3
#Refer to your analysis of Hospital.csv Chapter 4 page 140 problem 2.
#The probability is 0.53 that a randomly chosen hospital is in the Rocky
#Mountain region or is a not-for-profit, nongovernment hospital.
#True
#xFalse
#Question 4
#Refer to your analysis of Hospital.csv Chapter 4 page 140 problem 2.
#The probability is 0.045 that a randomly chosen hospital is a for-profit
#hospital located in California.
#xTrue
#False
#Question 5
#Refer to your analysis of Hospital.csv Chapter 5 problem 2 page 180 and to
#Chapter 6 page 220 problem 3.
#The probability of randomly selecting 16 hospitals from the database and
#getting exactly 9 that are psychiatric hospitals is 0.011. (Hint-use the
#hypergeometric distribution.)
#True
#xFalse
#Question 6
#Refer to your analysis of Hospital.csv Chapter 5 problem 2 page 180 and to
#Chapter 6 page 220 problem 3.
#Using the proportion of for-profit hospitals and the binomial distribution,
#the probability of randomly selecting 30 hospitals and getting exactly 10 that
#are for profit is 0.0610. (Hint--This assumes sampling with replacement.)
#xTrue
#False
#Question 7
#Refer to your analysis of Hospital.csv Chapter 5 problem 2 page 180 and to
#Chapter 6 page 220 problem 3.
#If lambda per hour is 4.1667, the interarrival time for admittance is 0.24 hour.
#xTrue
#False
#Question 8
#Refer to your analysis of Hospital.csv Chapter 5 problem 2 page 180 and to
#Chapter 6 page 220 problem 3.
#The probability more than 30 minutes would pass before the next admittance
#is 0.125.
#xTrue
#False
#Question 9
#Refer to your analysis of Hospital.csv Chapter 5 problem 2 page 180 and to
#Chapter 6 page 220 problem 3.
#The probability less than 10 minutes would pass before the next admittance
#is 0.6006.
#True
#xFalse
#Question 10
#Refer to your analysis of Hospital.csv Chapter 7 problem 3 page 254. Assume
#random sampling.
#The exact binomial probability of 45% or more of 500 hospitals being under the
#control of non-government not-for-profit organizations is 0.171.
#(Hint--Use p = 0.43.)
#True
#xFalse
#Question 11
#Refer to your analysis of Hospital.csv Chapter 7 problem 3 page 254. Assume
#random sampling.
#The exact binomial probability that less than 40% of 100 hospitals are under
#the control of non-government not-for-profit organizations is 0.241. (Hint--Use
#p = 0.43.).
#xTrue
#False
#Question 12
#The following question depends on results obtained by using the code in Appendix
#B. Only consider values of p in the assignment (0.5, 0.4, 0.3, 0.2, 0.1, 0.05,
#0.025) when answering.
#If p = 0.5, based on calculations using the binomial probability distribution,
#the normal approximation to the binomial distribution may be used with a sample
#size n >=10.
#green is np >= 5", "black is np >= 9(1-p)", "purple is np(1-p) >= 15"
#xTrue
#False
#Question 13
#The following question depends on results obtained by using the code in Appendix
#B. Only consider values of p in the assignment (0.5, 0.4, 0.3, 0.2, 0.1, 0.05,
#0.025) when answering.
#If p = 0.5, the rule np>= 9(1-p) justifies using a smaller sample size than the
#rule np(1-p)>=15.
#green is np >= 5", "black is np >= 9(1-p)", "purple is np(1-p) >= 15"
#xTrue
#False
#Question 14
#The following question depends on results obtained by using the code in Appendix
#B. Only consider values of p in the assignment (0.5, 0.4, 0.3, 0.2, 0.1, 0.05,
#0.025) when answering.
#If 0.2 <= p <= 0.4, then the rule np>=5 justifies using a smaller sample size
#than the other two rules.
#green is np >= 5", "black is np >= 9(1-p)", "purple is np(1-p) >= 15"
#xTrue
#False
#Question 15
#The following question depends on results obtained by using the code in Appendix
#B. Only consider values of p in the assignment (0.5, 0.4, 0.3, 0.2, 0.1, 0.05,
#0.025) when answering.
#If p = 0.025, the rule np>= 9(1-p) is preferable to the other two rules.
#green is np >= 5", "black is np >= 9(1-p)", "purple is np(1-p) >= 15"
#xTrue
#False
#Question 16
#The following question depends on results obtained by using the code in Appendix
#B. Only consider values of p in the assignment (0.5, 0.4, 0.3, 0.2, 0.1, 0.05,
#0.025) when answering.
#If p = 4/9, the rules np >= 5 and np >= 9(1-p) give different minimum sample
#sizes.
#green is np >= 5", "black is np >= 9(1-p)", "purple is np(1-p) >= 15"
#True
#xFalse
|
aa26e800b9e4a44bfdb2751136000634629b8d4d
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/ADMMnet/R/ADMMnet.R
|
c32f1db977447554869c25bacdd1eb2dddfe40ef
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,628
|
r
|
ADMMnet.R
|
####################################################
##### ADMMnet (ADMM-L0) #####
##### Penalty: L1, L2, Laplacian #####
##### Algorithm: one-step coordinate descent #####
####################################################
ADMMnet=function(x, y, family=c("gaussian", "cox"), penalty=c("Lasso","Enet", "Net"), Omega=NULL, alpha=1.0, lambda=NULL, nlambda=50, rlambda=NULL, nfolds=1, foldid=NULL, inzero=TRUE, adaptive=c(FALSE, TRUE), aini=NULL, isd=FALSE, keep.beta=FALSE, ifast=TRUE, thresh=1e-7, maxit=1e+5) {
#fcall=match.call()
family=match.arg(family)
penalty=match.arg(penalty)
if (penalty=="Lasso") {
penalty="Enet"
alpha=1.0
}
if (penalty=="Net" & is.null(Omega)) {
penalty="Enet"
cat("Enet was performed as no input of Omega")
}
if (family == "gaussian") {
fit=switch(penalty,
"Enet"=EnetLm(x,y,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive[1],aini,isd,keep.beta,thresh,maxit),
"Net"=NetLm(x,y,Omega,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive,aini,isd,keep.beta,thresh,maxit))
fit$family="gaussian"
} else if (family == "cox") {
fit=switch(penalty,
"Enet"=EnetCox(x,y,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive[1],aini,isd,keep.beta,ifast,thresh,maxit),
"Net"=NetCox(x,y,Omega,alpha,lambda,nlambda,rlambda,nfolds,foldid,inzero,adaptive,aini,isd,keep.beta,ifast,thresh,maxit))
fit$family="cox"
}
#fit$call=fcall
class(fit)="ADMMnet"
return(fit)
}
|
7b39a45c78e5a25526d76f27cb23e259b0f57eae
|
6e774b671a32c125c159907e5d92c1caec8dfad4
|
/test.R
|
edb4e58f80ccd0eb25a926656ad3878fe2dfc37f
|
[] |
no_license
|
bellhkim/dbscan
|
015681b12aed8bb60129958161bb4205950c67d0
|
5c0ed56794f04e0ac4c434a46368e95114dfea98
|
refs/heads/master
| 2020-03-22T14:49:28.486718
| 2018-07-09T01:32:07
| 2018-07-09T01:32:07
| 140,208,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,089
|
r
|
test.R
|
setwd('C:/Users/dsc/dbscan')
df = USArrests
dist(df, method = "manhattan") # "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski"
mahalanobis(df, colMeans(df), cov(df)) # mahalanobis(x, center : 각 변수의 평균, cov : 공분산행렬)
> x = data.frame(c(1,3,6,12,20))
> rownames(x) = c("a","b","c","d","e")
> d = dist(x)
>
> par(mfcol=c(2,2))
> hc = hclust(d, method = "single") # single linkage (최단연결법)
> plot(hc)
> hc2 = hclust(d, method = "complete") # complete linkage (최장연결법)
> plot(hc2)
> hc3 = hclust(d, method = "average") # average linkage (평균연결법)
> plot(hc3)
> hc4 = hclust(d, method = "ward.D") # ward method
> plot(hc4)
> par(mfcol=c(1,1))
>
> hc$height
[1] 2 3 6 8
> # USArrests
>
> distUS = dist(scale(df))
>
> hc = hclust(distUS, method = "single")
> plot(hc)
> hc2 = hclust(distUS, method = "complete")
> plot(hc2)
> hc3 = hclust(distUS, method = "average")
> plot(hc3)
> hc4 = hclust(distUS, method = "ward.D")
> plot(hc4)
> # Cluster Plot
> # 위 결과중 '평균연결법' 사용.
> h3result = cutree(hc3, k=5) # k : 그룹의 갯수
> plot(df$Murder, df$Assault, col=h3result, pch=h3result)
> text(df$Murder, df$Assault, labels = rownames(df), col=h3result, pos = 1)
> plot(df$Murder, df$UrbanPop, col=h3result, pch=h3result)
> text(df$Murder, df$UrbanPop, labels = rownames(df), col=h3result, pos = 1)
> df$cluster = h3result
>
> par(mfcol=c(2,2))
>
> for (i in 1:4) {
+ boxplot(df[,i] ~ h3result, main = names(df)[i])
+ }
> par(mfcol=c(1,1))
>
>
> library(psych)
Error in library(psych) : there is no package called ‘psych’
> describe(df)
Error in describe(df) : could not find function "describe"
> describeBy(df, group = h3result)
Error in describeBy(df, group = h3result) :
could not find function "describeBy"
> View(x)
> View(hc4)
> View(hc4)
> View(hc3)
> View(hc2)
> View(hc)
> View(df)
> describeBy(df, group = h3result)
Error in describeBy(df, group = h3result) :
could not find function "describeBy"
> getwd()
[1] "C:/Users/dsc/Documents"
|
9c45952724012703e38050cbdf2d8714eaed55cb
|
001811302399f314465780ad5732c0169fa5f894
|
/0501.R
|
55893dfe50d7045f8f00a7ac4fe1059563d792c1
|
[] |
no_license
|
wdwdz/learning-environment-survey
|
ec5c14c491b0152b42d2e32cdb866624082d43fe
|
5af03190fa6e3d4a927e85996c3b508cb1f6e2eb
|
refs/heads/main
| 2023-02-21T11:53:07.655698
| 2021-01-21T02:36:50
| 2021-01-21T02:36:50
| 331,481,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,331
|
r
|
0501.R
|
setwd("~/R Scripts/Learning Environment")
library(psych)
library(GPArotation)
library(lavaan)
library(lavaanPlot)
library(corrplot)
######################transfer chinese to numbers
data2 <- read.csv("almostalldata.csv",encoding = "UTF-8",header = F)
#data2[1,3]
#for (i in 1:40){
# print(i)
# print(data2[1,i])
#}
#levels(data2[1,14])
for (i in 1:40){
data2[,i] <-as.numeric(data2[,i])
}
data2[,1] <- 3-data2[,1]
data2[,2] <- data2[,2]-6
data2[,4] <- 3-data2[,4]
if(data2[1,1] == 0){
data2[1,1] <- 2
}
for (j in 1:384){
for(i in 6:9){
if(data2[j,i]==1){
data2[j,i] <- 4
}else if(data2[j,i]==3){
data2[j,i]<-7
}else if(data2[j,i]==4){
data2[j,i]<-3
}else if(data2[j,i]==7){
data2[j,i]<-1
}
}
}
for(i in 1:384){
if(data2[i,12]==1){
data2[i,12]<-4
}else if(data2[i,12]==3){
data2[i,12]<-1
}else if(data2[i,12]==4){
data2[i,12]<-7
}else if(data2[i,12]==5){
data2[i,12]<-3
}else if(data2[i,12]==6){
data2[i,12]<-5
}else if(data2[i,12]==7){
data2[i,12]<-6
}
}
#########
for(i in 1:384){
if (data2[i,14]==1){
data2[i,14]<-4
}else if(data2[i,14]==4){
data2[i,14]<-6
}else if(data2[i,14]==5){
data2[i,14]<-1
}else if (data2[i,14]==6){
data2[i,14]<-7
}else if(data2[i,14]==7){
data2[i,14]<-5
}
}
for(j in 1:384){
for(i in 15:40){
if(data2[j,i]==3){
data2[j,i]<-6
}else if(data2[j,i]==4){
data2[j,i]<-3
}else if(data2[j,i] ==5){
data2[j,i]<-7
}else if(data2[j,i] == 6){
data2[j,i]<-4
}else if (data2[j,i]==7){
data2[j,i]<-5
}
}
}
data2<-data2[-307,]
##########
data1 <- read.csv("test.csv",header = T)
data1 <- data1[,-1]
names(data2)<-c('Q1','Q2','Q3','Q4','Q5','Q6','Q7','Q8','Q9','Q10','Q11','Q12','Q13','Q14','Q15','Q16','Q17','Q18','Q19','Q20','Q21','Q22','Q23','Q24','Q25','Q26','Q27','Q28','Q29','Q30','Q31','Q32','Q33','Q34','Q35','Q36','Q37','Q38','Q39','Q40')
names(data1)<-c('Q1','Q2','Q3','Q4','Q5','Q6','Q7','Q8','Q9','Q10','Q11','Q12','Q13','Q14','Q15','Q16','Q17','Q18','Q19','Q20','Q21','Q22','Q23','Q24','Q25','Q26','Q27','Q28','Q29','Q30','Q31','Q32','Q33','Q34','Q35','Q36','Q37','Q38','Q39','Q40')
data<-rbind(data1,data2)
data<-na.omit(data)
data.model <-data[,c(6:9,12,14:40)]
data.cfa <- data.model
data.efa <- data.cfa
###0523 EFA+CFA+SEM
#split dataset
data.environment<- data.model[,-30:-32]
f<-data.frame(row.names(describe(data.model)),describe(data.model)$mean,describe(data.model)$sd,describe(data.model)$skew,describe(data.model)$kurtosis)
write.table (f,file ="f.csv", sep =",", row.names = FALSE)
N <- nrow(data.environment)
indices <- seq(1, N)
indices_EFA <- sample(indices, floor((.5*N)))
indices_CFA <- indices[!(indices %in% indices_EFA)]
environment_EFA <- data.environment[indices_EFA, ]
environment_CFA <- data.environment[indices_CFA, ]
# Use the indices from the previous exercise to create a grouping variable
group_var <- vector("numeric", N)
group_var[indices_EFA] <- 1
group_var[indices_CFA] <- 2
# Bind that grouping variable onto the gcbs dataset
environment_grouped <- cbind(data.environment, group_var)
# Compare stats across groups
describeBy(environment_grouped, group = group_var)
######H1: EFA+ CFA
#correlation
lowerCor(data.environment, use = "pairwise.complete.obs")
corr.test(data.environment, use = "pairwise.complete.obs")$p
# Use those indices to split the dataset into halves for your EFA and CFA
envi_EFA <- data.environment[indices_EFA, ]
envi_CFA <- data.environment[indices_CFA, ]
# Calculate the correlation matrix first
envi_EFA_cor <- cor(envi_EFA, use = "pairwise.complete.obs")
# Decide the number of factors
fa.parallel(envi_EFA_cor, n.obs = 261, fa = "both", n.iter = 100, main = "Scree plots with parallel analysis")
# Factor analysis, number of factors is 6
fa <- fa(envi_EFA_cor, nfactors = 6, rotate = "none", fm = "pa")
fa
# varimaxal Rotation
fa.varimax <- fa(envi_EFA_cor, nfactors = 6, rotate = "varimax", fm = "pa")
fa.varimax
# Promaxal rotation
fa.promax<- fa(envi_EFA_cor,nfactors = 6, rotate = "promax",fm = "pa")
h<-fa.promax$loadings
fa.diagram(fa.promax,simple =TRUE)
# Cronbach alpha
alpha(envi_EFA2[,c(18:21)])
####################CFA
#delete Q12,14,15,20,27,28,33,35
envi_CFA2<-envi_CFA[,c(-5,-6,-7,-12,-19,-20,-26,-27)]
cfa.model <- 'SA =~Q6+Q7+Q8+Q9
SS =~Q16+Q17+Q18+Q19
SI =~Q21+Q22+Q23
ST =~Q24+Q25+Q26
SM =~Q29+Q30+Q31+Q32
SR =~Q33+Q36+Q37
Q16 ~~ Q17
Q18 ~~ Q19
Q36 ~~ Q37
Q25 ~~ Q26
'
fit1_CFA <- cfa(cfa.model, data = envi_CFA2)
modindices(fit1_CFA, minimum.value = 3.841, sort=TRUE, free.remove = FALSE)
fitmeasures(fit1_CFA, c("chisq","df","pvalue","gfi","agfi","nfi","nnfi","cfi","rmsea","rmsea.ci.lower","rmsea.ci.upper","rmsea.pvalue","srmr", "ave"))
lavaanPlot(model = fit1_CFA, coefs = TRUE, stand=TRUE, covs = TRUE)
?fit1_CFA
####################H2 CFA + SEM
####################CFA
data.cfa$SA <- (data.cfa$Q6+data.cfa$Q7+data.cfa$Q8+data.cfa$Q9)/4
data.cfa$SS <- (data.cfa$Q16+data.cfa$Q17+data.cfa$Q18+data.cfa$Q19)/4
data.cfa$SI <- (data.cfa$Q21+data.cfa$Q22+data.cfa$Q23)/3
data.cfa$ST <- (data.cfa$Q24+data.cfa$Q25+data.cfa$Q26)/3
data.cfa$SM <- (data.cfa$Q29+data.cfa$Q30+data.cfa$Q31+data.cfa$Q32)/4
data.cfa$SR <- (data.cfa$Q33+data.cfa$Q36+data.cfa$Q37)/3
cfa.model <- 'Online_Env =~ SI+ST+SM+SR
Ambient_Env =~ Q6+Q7+Q8+Q9
Satisfaction =~ Q40
ST ~~ SM
Q6 ~~ Q7
Q8 ~~ Q9
'
fit2_CFA <- cfa(cfa.model, data = data.cfa)
modindices(fit2_CFA, minimum.value = 3.841, sort=TRUE, free.remove = FALSE)
fitmeasures(fit2_CFA, c("chisq","df","pvalue","gfi","agfi","nfi","nnfi","cfi","rmsea","rmsea.ci.lower","rmsea.ci.upper","rmsea.pvalue","srmr", "ave"))
lavaanPlot(model = fit2_CFA, coefs = TRUE, stand=TRUE)
############SEM
sem.model <- 'Satisfaction ~ Online_Env + Ambient_Env
Online_Env ~ Ambient_Env
Online_Env =~ SI+ST+SM+SR
Ambient_Env =~ Q6+Q7+Q8+Q9
Satisfaction =~ Q40
ST ~~ SM
Q6 ~~ Q7
Q8 ~~ Q9
'
fit2_SEM <- sem(sem.model, data = data.cfa)
fitmeasures(fit2_SEM, c("chisq","df","pvalue","gfi","agfi","nfi","nnfi","cfi","rmsea","rmsea.ci.lower","rmsea.ci.upper","rmsea.pvalue","srmr", "ave"))
lavaanPlot(model = fit2_SEM, coefs = TRUE, stand=TRUE, covs=TRUE)
coef(fit2_SEM)
#####################ggplot
library(ggplot2)
data$Q1 <- factor(data$Q1)
data$Q2 <- factor(data$Q2)
data$Q3 <- factor(data$Q3)
data$Q4 <- factor(data$Q4)
data$Q10 <- factor(data$Q10)
ggplot(data, aes(x=Q2,fill = Q1)) + geom_bar(stat="count",position="dodge")
ggplot(data, aes(x=Q3)) + geom_bar(stat="count")
ggplot(data, aes(x=Q4)) + geom_bar(stat="count")
ggplot(data, aes(x=Q10)) + geom_bar(stat="count")
try<-read.csv("almostalldata.csv",encoding = "UTF-8",header = F)
levels(data[1,1])
p=ggplot(data,aes(x=bmi,y=..density..,fill=sex))+geom_histogram(stat="bin",position="dodge",binwidth = 1)
p=p+geom_text(aes(label=as.character(round(..density..,3))),stat="bin",binwidth=1,vjust=-0.6)
print(p)
|
a48906b7b6cfd7c4e9d284f490bd6114280d5339
|
cb90bf49b725d96cf69d0c249d5097d0ce88d00d
|
/src/DESeq_bowtie.R
|
20b3917ff6138eaf1cd4026058d0febe00bd6149
|
[] |
no_license
|
weidagong/Metatranscriptomics
|
b8dcfaa87135f589180c7847eca08d4e1173497d
|
5abaa00a4eb1cbdc975f07e31226090f8528687b
|
refs/heads/master
| 2020-04-07T13:44:23.329624
| 2018-11-28T00:35:07
| 2018-11-28T00:35:07
| 158,416,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,923
|
r
|
DESeq_bowtie.R
|
library(DESeq2)
library(readr)
###differential expression analysis with DESeq2 for Salmon output
setwd("/proj/marchlab/projects/EXPORTS/metatranscriptomics/Tara-test/assembly/bowtie/bowtie")
quant_filename <- list.files(pattern = "*counts.tab")
srr <- sub(".counts.tab", "", quant_filename)
samples <- read.csv("../samples.csv", stringsAsFactors = F)
rownames(samples) <- samples$Run
###1. import with tximport
#tx2gene is necessary to group transcript into genes, not using here because of \
#lack of functional annotation
# txi <- tximport(files = quant_files, type = "salmon", txOut = T)
# txi <- tximport(files = quant_files, type = "salmon", tx2gene = tx2gene)
quant_files <- lapply(quant_filename, read_table2, col_names = F)
cts <- Reduce(function(df1, df2) merge(df1, df2, by = "X2", all = T), quant_files)
names(cts) <- c("contig", srr)
cts[is.na(cts)] <- 0
rownames(cts) <- cts$contig
cts <- cts[, -1]
#divide read abundance by 2 because counting both ends
cts <- cts/2
#check naming and order are consistent
(rownames(samples) == colnames(cts))
cts <- cts[, rownames(samples)]
(rownames(samples) == colnames(cts))
###2. construct a DESeq object
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = samples,
design = ~ Depth)
#prefiltering
keep <- rowSums(counts(dds)) >= 10
dds <- dds[keep, ]
#first factor is the control
dds$Depth <- factor(dds$Depth, levels = c("SRF", "DCM"))
###3. differential expression analysis
###requires a lot of memory, run on cluster
dds <- DESeq(dds)
res <- results(dds)
#log fold change shrikage to remove low count noise
resLFC <- lfcShrink(dds, 2, type = "apeglm")
save(cts, res, dds, resLFC, file = "DESeq-bowtie.RData")
###To be continued
###Independent hypothesis weighting
###MA-plot
#plotMA(res, ylim = c(-2, 2))
|
17a4b7b88bb0f71db6591daa95bc1f06b2640fa8
|
54f8e843045e8edb16152b6999491613080c758e
|
/2 CreateIntraannualProfiles_1.7.R
|
8ab8e9f32494f824cdce686fd8c8531f1b0bce2d
|
[] |
no_license
|
patrickfonti/QWA
|
43d9416881a2ec43f42427bfd5f505748bdf36a4
|
8c451b4161c808f6c39d616264f85038b5d9b78a
|
refs/heads/master
| 2021-04-10T06:10:27.300168
| 2020-03-22T16:54:21
| 2020-03-22T16:54:21
| 248,915,889
| 0
| 0
| null | null | null | null |
IBM852
|
R
| false
| false
| 194,729
|
r
|
2 CreateIntraannualProfiles_1.7.R
|
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#
# GENERAL DESCRIPTION:
#
# This scripts creates intra-annual profiles of anatomical parameters based on
# summary files (one per radial wood piece) of ROXAS text file output for rings
# and cells. No data is shared across ring borders, but restricted to the
# respective target ring.Profile data is plotted and saved to file. In addition,
# files for means (overall, earlywood, latewood), minimum and maximum values per
# ring are created.
#
#
# SOME DETAILS:
#
# - Measured distances from the ring border for each cell (distance of center
# of mass) are re-scaled to account for tangential fluctuations in ring width
# by the factor (RWi/MRW), where RWi is the ring width at the cell's tangential
# position and MRW is the average ring width.
# - The last tangential intra-ring band of each ring is forced to have the
# same width as specified, which usually means that some cells of the second
# last band will be re-used for aggregations in the last band.
# - For each tangential intra-ring band 4 different types of aggregating are
# available: mean, median, 25th quantile, 75th quantile. Data and plots are
# created for each combination of data aggregation type and resolution.
#
#
# CUSTOMIZABLE OPTIONS:
#
# YR.START, YR.END - target period
# STATS - aggregation method(s) for intra-ring tangential bands (mean, median, 25th and 75th quantile)
# RESO - resolution(s) / band width(s) (Ám) of the intra-ring profiles
# SMOOTHER - smoothing spline for the intra-ring profiles
# PLOT.IMG.ID - alternating white-grey background in intra-ring profile plots to indicate when data from a next image is used
# LINE.COL - color gradient to visualize RESO in intra-ring profile plots
# MORK.EWLW - threshold of Mork's index to indicate transition from earlywood to latewood
# Several variables to define quality control (QC) features added to the intra-annual lumen area (LA) profiles
#
#
# INCLUDED PARAMETERS:
#
# MRW - ring width (integrating tangential fluctuations) [Ám]
# LA - lumen area [Ám2]
# TCA - total cell area (lumen area + cell wall area) [Ám2]
# DRAD - radial lumen diameter [Ám]
# DTAN - tangential lumen diameter [Ám]
# CWA - cell wall area (unit: cell) [Ám2]
# CWAACC - accumulated cell wall area (unit: sum of each intra-ring band) [Ám2]
# CWTALL - average cell wall thickness (mean of tangential and radial) [Ám]
# CWTALL.ADJ - adjusted average cell wall thickness (earlywood: tangential, latewood: mean of tangential and radial) [Ám]
# CWTTAN - tangential cell wall thickness [Ám]
# CWTRAD - radial cell wall thickness [Ám]
# RTSR - Mork's index (radial thickness-to-span ratio: 4*CWTTAN/DRAD) [-]
# CTSR - adjusted Mork's index (circular thickness-to-span ratio: 4*CWTALL/D, where D is the tracheid diameter assuming a circular lumen) [-]
# DCWT - anatomical density based on cWT (assuming a circular lumen area with homogenous cell wall thickness around it; for latewood-like cells, taking CWTALL, for earlywood-like cells, taking CWTTAN to avoid pit artefacts) [-]
# DCWA - anatomical density based on CWA (CWA/(CWA+LA)) [-]
# DCWT2 - special anatomical density based on CWT (CWTRAD/DRAD) [-]
# TB2 - cell wall reinforcement index ((2*CWTTAN/DTAN)^2) [-]
# KH - theoretical hydraulic conductivity as approximated by Poiseuille's law and adjusted to elliptical tubes [m^4*s^-1*MPa^-1]
# DH - mean hydraulic diameter ((S(D^4)/n)^0.25, where S is the sum, D is the hydraulic cell diameter, n the number of cells) [Ám]
# N.BAND - number of considered cells in intra-ring band; some of the cells in the last band in each year are in common with those in the second last band [#]
# N.TRUE - number of considered cells in intra-ring band; no overlapping / shared cells between last and second last band in each year [#]
#
#
# TO DO:
#
# Suppress warnings (suppressWarnings()) when using min/max function and getting message "No non-missing values found in at least one group. Returning '-Inf' for such groups to be consistent with base"
#
#
# v1.7, 30 October 2019
#
# (c) Georg von Arx
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#
### 1. Clean and load libraries ####
rm(list=ls()) # clean desk
library(plyr)
library(data.table)
library(zoo)
library(dplyr)
### 2. Let user customize some options ####
### 2.1 General options ####
YR.START <- 1978 #minimum potential start year of profiles
YR.END <- 2017 #maximum potentitial ending year of profiles
# STATS <- c("mean","median","q25","q75") #different methods for aggregation within each intra-ring band
STATS <- c("median") #different methods for aggregation within each intra-ring band
# RESO <- c(2,5,10,20,30,40,50,60,80,100,120,160,200,300,500) #spatial resolution of profiles (micrometers), i.e. width of intra-ring bands
# RESO <- c(20) #spatial resolution of profiles (micrometers), i.e. width of intra-ring bands
RESO <- c(20) #spatial resolution of profiles (micrometers), i.e. width of intra-ring bands
SMOOTHER <- 1 #uneven numbers only! Defines the width of the rolling mean filter with respect to RESO; 1 means no smoothing
PLOT.IMG.ID <- TRUE #when this option is activated, the background of the intra-annual profiles shows periods for which data is taken from a specific image
LINE.COL <- colorRampPalette(c("red","green","blue"))
MORK.EWLW <- 1
### 2.2 Quality control (QC) options applied to intra-ring lumen area (LA) profiles ####
NCELL.MIN <- 5 #minimum number of cells per band; if below, the band will be flagged
LA.REF <- 0.9 #lumen area quantile per image (segments) for horizontal reference line
LA.THR <- 0.9 #maximum allowed deviation in lumen area quantile between neighboring image (segments); if deviating more, the reference quantile line will be highlighted
DUP.RW <- 0.05 #maximum deviation in ring width of neighboring rings to consider them in duplicate identification
DUP.KS <- 0.98 #minimum p-value of Kolmogorov-Smirnoff test between lumen area profiles of neighboring rings to highlight them as potential duplicates (cross-dating mistake!)
RM.KERNEL <- 9 #kernel size of running mean smoothing; the resulting smoothed LA profiles serves as a reference to identify (large) outliers
OUT.THR <- 1.5 #threshold factor by which LA at a specific point should be larger then the smoothed LA reference profile to flag it as a potential outlier
OUT.QU <- 0.75 #minimum quantile of smoothed LA reference profile to allow flagging potential outliers
### 3. Define directory containing all the summary data files (only on top level, no sub-directories!) ####
# setwd("D:/TrainingSchools_QWA/NADEF_QWA_2019_Cody/Analysis/R Analysis/Series")
setwd("C:/Users/ferriz/Desktop/roxas1/output")
homedir <- getwd()
### 4. Process data from each radial wood piece ####
(cell.files <- grep("Output_Cells.txt", list.files(path=homedir), value=TRUE)) #find the summary cell files
(ring.files <- grep("Output_Rings.txt", list.files(path=homedir), value=TRUE)) #find the summary ring files
### Get file with information about years/rings to be excluded (simple text file with 2 columns: TREE, YEAR)
exclude <- read.table("_YearsToExclude.txt", header=T)
### Define some functions
RobustMax <- function(x) {if (length(x)>0) max(x) else -Inf} #to avoid applying max to numeric of length 0 argument
RobustMin <- function(x) {if (length(x)>0) min(x) else -Inf} #to avoid applying max to numeric of length 0 argument
t <- Sys.time() #initiate keeping track of processing speed
### Start main loop through all files
for (i in c(2:length(cell.files)))
{
### 4.1 Load data ####
woodid <- sub("_Output_Cells.txt", "", cell.files[i])
print(paste("(", i, "/", length(cell.files), ") - ", "Processing wood piece: ", woodid, sep=""))
cells <- NULL
rings <- NULL
cells <- read.table(cell.files[i], header=T) #cell data
rings <- read.table(ring.files[i], header=T) #ring width data: used for standardization
cells <- cells[cells$YEAR>min(rings$YEAR,na.rm=T),] #drop innermost/first year that is not complete
rings <- rings[rings$YEAR>min(rings$YEAR,na.rm=T),] #drop innermost/first year that is not complete
rings <- rings[rings$YEAR%in%intersect(unique(rings$YEAR), unique(cells$YEAR)),] #exclude data from years that are not present in both cells and rings dataframes
cells <- cells[cells$YEAR%in%intersect(unique(rings$YEAR), unique(cells$YEAR)),] #exclude data from years that are not present in both cells and rings dataframes
### Create dataframe with WOODID - IMAGE - YEAR structure to re-install time series integrity if some rings are missing/dropped later on
ringTemplate <- rings
ringTemplate$WOODID <- woodid
colnames(ringTemplate)[1] <- "IMAGE"
ringTemplate <- ringTemplate[,c("YEAR", "WOODID","IMAGE")]
ringTemplate$WOODID <- as.factor(ringTemplate$WOODID)
dt <- NULL
dt <- cells[!duplicated(cells[,c("ID","YEAR")]),c("ID","YEAR")] #create dataframe with IMAGE-YEAR from cells file
rownames(dt) <- NULL
ringTemplate <- merge(ringTemplate,dt,"YEAR",all=TRUE)
ringTemplate$IMAGE <- ifelse(!is.na(ringTemplate$ID),as.character(ringTemplate$ID),as.character(ringTemplate$IMAGE)) #take IMAGE/ID info from cells in case there is such info
ringTemplate$IMAGE <- as.factor(ringTemplate$IMAGE)
ringTemplate$ID <- NULL
### 4.2 Prepare data frame for analysis ####
cells <- left_join(cells, rings[,c("YEAR", "MRW")], by="YEAR") #merge the two data files
cells[] <- lapply(cells, function(x){if (is.numeric(x)) replace(x, x < 0, NA) else x}) #replace error codes (negative values) by NA
cells$RW.CELL <- cells$RADDISTR / cells$RRADDISTR * 100 #get ring width at tangential position of each cell
cells$RADDISTR.ST <- cells$RADDISTR / cells$RW.CELL * cells$MRW #standardized absolute distance from ring border (accounting for wedging, curvatures and other tangential irregularities of ring width)
cells <- cells[order(cells$YEAR, cells$RADDISTR.ST),] #order cells by YEAR and standardized absolute distance from ring border
if("BEND" %in% colnames(cells)){colnames(cells)[colnames(cells)=="BEND"] <- "TB2"} #for backwards compatibility: replace BEND by TB2
cells$WOODID <- as.factor(woodid) #make sure WOODID is a factor
### 4.3 Add CWT-based density: assume a circular lumen area with homogenous cell wall thickness around it; for latewood-like cells, take overall average CWT, for earlywood-like cells, only consider CWTTAN, which avoids pit artefacts ####
cells$RADIUS <- sqrt(cells$LA / pi)
cells$WA <- ifelse(cells$RTSR < 1,
(cells$RADIUS + cells$CWTTAN)^2 * pi - cells$LA,
(cells$RADIUS + cells$CWTALL)^2 * pi - cells$LA)
cells$DCWT <- cells$WA / (cells$LA + cells$WA)
### 4.4 Add mean CWT: mean of radial and tangential CWT if Mork index latewood-like, in earlywood-like cells take CWTTAN ####
cells$CWTALL.ADJ <- ifelse(cells$RTSR < 1, cells$CWTTAN, cells$CWTALL)
### 4.5 Remove rows where the cell center falls exactly on ring border (mistake/inaccurate!) and correct small interpolation mistakes where relative intra-ring positions >100% to avoid calculations problems later on ####
cells <- cells[!(cells$RADDISTR==0),]
cells$RRADDISTR <- ifelse(cells$RRADDISTR>100, 100, cells$RRADDISTR)
### 4.6 Loop for each aggregation method ("mean","median","q25","q75") ####
for (s in c(1:length(STATS)))
{
print(paste(" (", make.unique(rep(LETTERS, length.out=100), sep='')[s], ") ", STATS[s], ": ", sep=""))
### 4.6.1 Process data for each intra-ring resolution step ####
for (r in c(1:length(RESO)))
{
print(paste(" (", make.unique(rep(letters, length.out=100), sep='')[r], ") ", "Resolution: ", RESO[r], " microns", sep=""))
### 4.6.1.1 Add column with resolution (micrometers) for each ring; insert row if one band is missing ####
cells$RADDISTR.BAND <- RESO[r]* round((cells$RADDISTR.ST-RESO[r]/2.01)/RESO[r] , 0) #add column with resolution (micrometers) for each ring; insert row if one band is missing
### 4.6.1.2 Calculate aggregates for each intra-ring resolution band (standardized absolute distance from ring border) ####
if (tolower(STATS[s])=="mean") #aggregation method = "mean"
{
dt <- NULL
dt <- data.table(cells)
setkey(dt, "YEAR", "WOODID", "ID", "RADDISTR.BAND")
dt <- dt[, list(RRADDISTR=mean(RRADDISTR, na.rm=TRUE)
, MRW=RobustMax(MRW)
, LA=as.numeric(mean(LA, na.rm=TRUE))
, TCA=mean(TCA, na.rm=TRUE)
, DRAD=mean(DRAD, na.rm=TRUE)
, DTAN=mean(DTAN, na.rm=TRUE)
, CWA=mean(CWA, na.rm=TRUE)
, CWAACC=sum(CWA, na.rm=TRUE)
, CWTALL=mean(CWTALL, na.rm=TRUE)
, CWTALL.ADJ=mean(CWTALL.ADJ, na.rm=TRUE)
, CWTTAN=mean(CWTTAN, na.rm=TRUE)
, CWTRAD=mean(CWTRAD, na.rm=TRUE)
, RTSR=mean(RTSR, na.rm=TRUE)
, CTSR=mean(CTSR, na.rm=TRUE)
, DCWT=mean(DCWT, na.rm=TRUE)
, DCWA=mean(RWD, na.rm=TRUE)
, DCWT2=mean(RWD2, na.rm=TRUE)
, TB2=mean(TB2, na.rm=TRUE)
, KH=mean(KH, na.rm=TRUE)
, N.BAND=sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)
, DH=(sum(DH^4)/(sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)))^0.25),
by=key(dt)]
df0 <- as.data.frame(dt)
}
if (tolower(STATS[s])=="median") #aggregation method = "median"
{
dt <- NULL
dt <- data.table(cells)
setkey(dt, "YEAR", "WOODID", "ID", "RADDISTR.BAND")
dt <- dt[, list(RRADDISTR=median(RRADDISTR, na.rm=TRUE)
, MRW=RobustMax(MRW)
, LA=as.numeric(median(LA, na.rm=TRUE))
, TCA=median(TCA, na.rm=TRUE)
, DRAD=median(DRAD, na.rm=TRUE)
, DTAN=mean(DTAN, na.rm=TRUE)
, CWA=median(CWA, na.rm=TRUE)
, CWAACC=sum(CWA, na.rm=TRUE)
, CWTALL=median(CWTALL, na.rm=TRUE)
, CWTALL.ADJ=median(CWTALL.ADJ, na.rm=TRUE)
, CWTTAN=median(CWTTAN, na.rm=TRUE)
, CWTRAD=median(CWTRAD, na.rm=TRUE)
, RTSR=median(RTSR, na.rm=TRUE)
, CTSR=median(CTSR, na.rm=TRUE)
, DCWT=median(DCWT, na.rm=TRUE)
, DCWA=median(RWD, na.rm=TRUE)
, DCWT2=median(RWD2, na.rm=TRUE)
, TB2=median(TB2, na.rm=TRUE)
, KH=median(KH, na.rm=TRUE)
, N.BAND=sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)
, DH=(sum(DH^4)/(sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)))^0.25),
by=key(dt)]
df0 <- as.data.frame(dt)
}
if (tolower(STATS[s])=="q25") #aggregation method = "25th quantile"
{
dt <- NULL
dt <- data.table(cells)
setkey(dt, "YEAR", "WOODID", "ID", "RADDISTR.BAND")
dt <- dt[, list(RRADDISTR=quantile(RRADDISTR, probs=0.25, na.rm=TRUE)
, MRW=RobustMax(MRW)
, LA=as.numeric(quantile(LA, probs=0.25, na.rm=TRUE))
, TCA=quantile(TCA, probs=0.25, na.rm=TRUE)
, DRAD=quantile(DRAD, probs=0.25, na.rm=TRUE)
, DTAN=quantile(DTAN, probs=0.25, na.rm=TRUE)
, CWA=quantile(CWA, probs=0.25, na.rm=TRUE)
, CWAACC=sum(CWA, na.rm=TRUE)
, CWTALL=quantile(CWTALL, probs=0.25, na.rm=TRUE)
, CWTALL.ADJ=quantile(CWTALL.ADJ, probs=0.25, na.rm=TRUE)
, CWTTAN=quantile(CWTTAN, probs=0.25, na.rm=TRUE)
, CWTRAD=quantile(CWTRAD, probs=0.25, na.rm=TRUE)
, RTSR=quantile(RTSR, probs=0.25, na.rm=TRUE)
, CTSR=quantile(CTSR, probs=0.25, na.rm=TRUE)
, DCWT=quantile(DCWT, probs=0.25, na.rm=TRUE)
, DCWA=quantile(RWD, probs=0.25, na.rm=TRUE)
, DCWT2=quantile(RWD2, probs=0.25, na.rm=TRUE)
, TB2=quantile(TB2, probs=0.25, na.rm=TRUE)
, KH=quantile(KH, probs=0.25, na.rm=TRUE)
, N.BAND=sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)
, DH=(sum(DH^4)/(sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)))^0.25),
by=key(dt)]
df0 <- as.data.frame(dt)
}
if (tolower(STATS[s])=="q75") #aggregation method = "75th quantile"
{
dt <- NULL
dt <- data.table(cells)
setkey(dt, "YEAR", "WOODID", "ID", "RADDISTR.BAND")
dt <- dt[, list(RRADDISTR=quantile(RRADDISTR, probs=0.75, na.rm=TRUE)
, MRW=RobustMax(MRW)
, LA=as.numeric(quantile(LA, probs=0.75, na.rm=TRUE))
, TCA=quantile(TCA, probs=0.75, na.rm=TRUE)
, DRAD=quantile(DRAD, probs=0.75, na.rm=TRUE)
, DTAN=quantile(DTAN, probs=0.75, na.rm=TRUE)
, CWA=quantile(CWA, probs=0.75, na.rm=TRUE)
, CWAACC=sum(CWA, na.rm=TRUE)
, CWTALL=quantile(CWTALL, probs=0.75, na.rm=TRUE)
, CWTALL.ADJ=quantile(CWTALL.ADJ, probs=0.75, na.rm=TRUE)
, CWTTAN=quantile(CWTTAN, probs=0.75, na.rm=TRUE)
, CWTRAD=quantile(CWTRAD, probs=0.75, na.rm=TRUE)
, RTSR=quantile(RTSR, probs=0.75, na.rm=TRUE)
, CTSR=quantile(CTSR, probs=0.75, na.rm=TRUE)
, DCWT=quantile(DCWT, probs=0.75, na.rm=TRUE)
, DCWA=quantile(RWD, probs=0.75, na.rm=TRUE)
, DCWT2=quantile(RWD2, probs=0.75, na.rm=TRUE)
, TB2=quantile(TB2, probs=0.75, na.rm=TRUE)
, KH=quantile(KH, probs=0.75, na.rm=TRUE)
, N.BAND=sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)
, DH=(sum(DH^4)/(sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)))^0.25),
by=key(dt)]
df0 <- as.data.frame(dt)
}
### 4.6.1.3 Correct for potentially reduced last band compared to the specified resolution ####
### Find out for each ring the max(RADDISTR.ST)
dt <- NULL
dt <- data.table(cells)
setkey(dt, "YEAR", "WOODID", "ID")
dt <- dt[, list(RADDISTR.ST.LAST=max(RADDISTR.ST, na.rm=TRUE), RADDISTR.BAND.MAX=max(RADDISTR.BAND, na.rm=TRUE)), by=key(dt)]
df01 <- as.data.frame(dt)
### Define for each ring the RADDISTR.ST that is max(RADDISTR.ST)-RESO; the minimum possible value is 0 (in very narrow rings and/or wide bands)
df01$RADDISTR.ST.LAST <- df01$RADDISTR.ST-RESO[r]
df01$RADDISTR.ST.LAST <- ifelse(df01$RADDISTR.ST.LAST<0, 0, df01$RADDISTR.ST.LAST)
### Add columns to cells dataframe with information about max. abs. distance and last band
cells <- full_join(cells, df01[,c("YEAR","WOODID","ID","RADDISTR.ST.LAST","RADDISTR.BAND.MAX")], by=c("ID", "YEAR", "WOODID"))
### Calculate a summary of all relevant parameters for this updated last band
cells1 <- cells[cells$RADDISTR.ST>=cells$RADDISTR.ST.LAST,] #subset dataframe with only data used for summarizing last updated band
cells1 <- cells1[!is.na(cells1$YEAR),] #for some reason, the previous line creates NA lines...
cells$RADDISTR.ST.LAST <- NULL #reset
cells$RADDISTR.BAND.MAX <- NULL #reset
if (tolower(STATS[s])=="mean") #aggregation method = "mean"
{
dt <- NULL
dt <- data.table(cells1)
setkey(dt, "YEAR", "WOODID", "ID", "RADDISTR.BAND.MAX")
dt <- dt[, list(RRADDISTR=mean(RRADDISTR, na.rm=TRUE)
, MRW=RobustMax(MRW)
, LA=as.numeric(mean(LA, na.rm=TRUE))
, TCA=mean(TCA, na.rm=TRUE)
, DRAD=mean(DRAD, na.rm=TRUE)
, DTAN=mean(DTAN, na.rm=TRUE)
, CWA=mean(CWA, na.rm=TRUE)
, CWAACC=sum(CWA, na.rm=TRUE)
, CWTALL=mean(CWTALL, na.rm=TRUE)
, CWTALL.ADJ=mean(CWTALL.ADJ, na.rm=TRUE)
, CWTTAN=mean(CWTTAN, na.rm=TRUE)
, CWTRAD=mean(CWTRAD, na.rm=TRUE)
, RTSR=mean(RTSR, na.rm=TRUE)
, CTSR=mean(CTSR, na.rm=TRUE)
, DCWT=mean(DCWT, na.rm=TRUE)
, DCWA=mean(RWD, na.rm=TRUE)
, DCWT2=mean(RWD2, na.rm=TRUE)
, TB2=mean(TB2, na.rm=TRUE)
, KH=mean(KH, na.rm=TRUE)
, N.BAND=sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)
, DH=(sum(DH^4)/(sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)))^0.25),
by=key(dt)]
df02 <- as.data.frame(dt)
}
if (tolower(STATS[s])=="median") #aggregation method = "median"
{
dt <- NULL
dt <- data.table(cells1)
setkey(dt, "YEAR", "WOODID", "ID", "RADDISTR.BAND.MAX")
dt <- dt[, list(RRADDISTR=median(RRADDISTR, na.rm=TRUE)
, MRW=RobustMax(MRW)
, LA=as.numeric(median(LA, na.rm=TRUE))
, TCA=median(TCA, na.rm=TRUE)
, DRAD=median(DRAD, na.rm=TRUE)
, DTAN=mean(DTAN, na.rm=TRUE)
, CWA=median(CWA, na.rm=TRUE)
, CWAACC=sum(CWA, na.rm=TRUE)
, CWTALL=median(CWTALL, na.rm=TRUE)
, CWTALL.ADJ=median(CWTALL.ADJ, na.rm=TRUE)
, CWTTAN=median(CWTTAN, na.rm=TRUE)
, CWTRAD=median(CWTRAD, na.rm=TRUE)
, RTSR=median(RTSR, na.rm=TRUE)
, CTSR=median(CTSR, na.rm=TRUE)
, DCWT=median(DCWT, na.rm=TRUE)
, DCWA=median(RWD, na.rm=TRUE)
, DCWT2=median(RWD2, na.rm=TRUE)
, TB2=median(TB2, na.rm=TRUE)
, KH=median(KH, na.rm=TRUE)
, N.BAND=sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)
, DH=(sum(DH^4)/(sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)))^0.25),
by=key(dt)]
df02 <- as.data.frame(dt)
}
if (tolower(STATS[s])=="q25") #aggregation method = "25th quantile"
{
dt <- NULL
dt <- data.table(cells1)
setkey(dt, "YEAR", "WOODID", "ID", "RADDISTR.BAND.MAX")
dt <- dt[, list(RRADDISTR=quantile(RRADDISTR, probs=0.25, na.rm=TRUE)
, MRW=RobustMax(MRW)
, LA=as.numeric(quantile(LA, probs=0.25, na.rm=TRUE))
, TCA=quantile(TCA, probs=0.25, na.rm=TRUE)
, DRAD=quantile(DRAD, probs=0.25, na.rm=TRUE)
, DTAN=quantile(DTAN, probs=0.25, na.rm=TRUE)
, CWA=quantile(CWA, probs=0.25, na.rm=TRUE)
, CWAACC=sum(CWA, na.rm=TRUE)
, CWTALL=quantile(CWTALL, probs=0.25, na.rm=TRUE)
, CWTALL.ADJ=quantile(CWTALL.ADJ, probs=0.25, na.rm=TRUE)
, CWTTAN=quantile(CWTTAN, probs=0.25, na.rm=TRUE)
, CWTRAD=quantile(CWTRAD, probs=0.25, na.rm=TRUE)
, RTSR=quantile(RTSR, probs=0.25, na.rm=TRUE)
, CTSR=quantile(CTSR, probs=0.25, na.rm=TRUE)
, DCWT=quantile(DCWT, probs=0.25, na.rm=TRUE)
, DCWA=quantile(RWD, probs=0.25, na.rm=TRUE)
, DCWT2=quantile(RWD2, probs=0.25, na.rm=TRUE)
, TB2=quantile(TB2, probs=0.25, na.rm=TRUE)
, KH=quantile(KH, probs=0.25, na.rm=TRUE)
, N.BAND=sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)
, DH=(sum(DH^4)/(sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)))^0.25),
by=key(dt)]
df02 <- as.data.frame(dt)
}
if (tolower(STATS[s])=="q75") #aggregation method = "75th quantile"
{
dt <- NULL
dt <- data.table(cells1)
setkey(dt, "YEAR", "WOODID", "ID", "RADDISTR.BAND.MAX")
dt <- dt[, list(RRADDISTR=quantile(RRADDISTR, probs=0.75, na.rm=TRUE)
, MRW=RobustMax(MRW)
, LA=as.numeric(quantile(LA, probs=0.75, na.rm=TRUE))
, TCA=quantile(TCA, probs=0.75, na.rm=TRUE)
, DRAD=quantile(DRAD, probs=0.75, na.rm=TRUE)
, DTAN=quantile(DTAN, probs=0.75, na.rm=TRUE)
, CWA=quantile(CWA, probs=0.75, na.rm=TRUE)
, CWAACC=sum(CWA, na.rm=TRUE)
, CWTALL=quantile(CWTALL, probs=0.75, na.rm=TRUE)
, CWTALL.ADJ=quantile(CWTALL.ADJ, probs=0.75, na.rm=TRUE)
, CWTTAN=quantile(CWTTAN, probs=0.75, na.rm=TRUE)
, CWTRAD=quantile(CWTRAD, probs=0.75, na.rm=TRUE)
, RTSR=quantile(RTSR, probs=0.75, na.rm=TRUE)
, CTSR=quantile(CTSR, probs=0.75, na.rm=TRUE)
, DCWT=quantile(DCWT, probs=0.75, na.rm=TRUE)
, DCWA=quantile(RWD, probs=0.75, na.rm=TRUE)
, DCWT2=quantile(RWD2, probs=0.75, na.rm=TRUE)
, TB2=quantile(TB2, probs=0.75, na.rm=TRUE)
, KH=quantile(KH, probs=0.75, na.rm=TRUE)
, N.BAND=sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)
, DH=(sum(DH^4)/(sum(MRW, na.rm=TRUE)/mean(MRW, na.rm=TRUE)))^0.25),
by=key(dt)]
df02 <- as.data.frame(dt)
}
colnames(df02)[4] <- "RADDISTR.BAND" #rename column
cells1 <- NULL #reset
### Replace the current last band by this new one
df0 <- rbind(df02, df0) #merge the the two dataframes with the one holding the updated last-band information on top
dt <- NULL
dt <- data.table(df0)
dt[, N.TRUE := RobustMin(N.BAND), by = c("YEAR", "WOODID", "ID", "RADDISTR.BAND")] #add column with minimum (original) number for each band -> the same as n, except for the last band, for which the count before updating is given
df0 <- as.data.frame(dt)
df0 <- df0[!duplicated(df0[,c("YEAR","WOODID","ID","RADDISTR.BAND")]),] #eliminate last-band information from lower part (with original bands)
df0 <- arrange(df0, YEAR,RADDISTR.BAND) #bring data frame into original order: year-band
#### THE FOLLOWING BLOCK COMMENTED AS IT IS THE OLD WAY OF CALCULATING EWW & LWWW
# ### 4.6.1.4 Calculate EWW, LWW and position of EW-LW transition ###
# ### Create smoothed Mork's profile when using high resolution to locate EW-LW transition more confidentially
# smooth <- ifelse(RESO[r]<=2, 13, ifelse(RESO[r]<=5, 9, ifelse(RESO[r]<=10, 5, ifelse(RESO[r]<=20, 3, 1)))) #only smooth with resolution 2, 5, 10 and 20 microns
#
# ### CONSIDER TO PERFORM THIS FOR EACH RING INDIVIDUALLY!
# if (smooth>1)
# {
# margin <- c(mean(df0$RTSR.EW[1:((smooth-1)/2)], na.rm=TRUE), NA ,mean(df0$RTSR.EW[c((ln<-length(df0$RTSR.EW)):(ln-((smooth-1)/2)))], na.rm=TRUE))
# df0$RTSR.S <- rollapply(df0$RTSR.EW, smooth, fill=margin, mean, na.rm=TRUE)
# } else
# {
# df0$RTSR.S <- df0$RTSR.EW
# }
# ### If the intra-ring profile does not contain any band with Mork > 1 after smoothing, replace the values by the original values
# dt <- NULL
# dt <- data.table(df0)
# # dt <- data.table(ddply(dt, "YEAR", mutate, MAXMORK=RobustMax(RTSR.S))) #find for each year the max. Mork
# dt <- data.table(ddply(dt, "YEAR", mutate, MAXMORK=max(RTSR.S,na.rm=T))) #find for each year the max. Mork
#
# dt$RTSR.S <- ifelse(dt$MAXMORK<1,dt$RTSR.EW,dt$RTSR.S) #Replace RTSR.S by RTSR if max MAXMORK < 1
# dt$MAXMORK <- NULL
# df0 <- as.data.frame(dt)
# ### Find band with maximum Mork index excluding the very first part of the earlywood
# dt <- NULL
# # dt <- data.table(df0[df0$RRADDISTR>=50,])
# dt <- data.table(df0[(df0$RRADDISTR/100*df0$MRW>=50 | df0$RRADDISTR>33),]) #only consider bands that are at least 50 micrometer away from ring border; note that RRADDISTR is based on individual cells and only approximate
# setkey(dt, "YEAR")
# # dt <- dt[, list(BANDMXMORK=RADDISTR.BAND[which(RTSR.S==RobustMax(RTSR.S))]), by=key(dt)] #for some reason this sometimes skipped some years...
# suppressWarnings(dt <- dt[, list(BANDMXMORK=RADDISTR.BAND[which(RTSR.S==max(RTSR.S,na.rm=T))]), by=key(dt)])
# ewlw <- as.data.frame(dt)
#
# # ewlw <- distinct(ewlw, YEAR) #to make sure only the first of equal max. values within a year is taken!
# ewlw <- ewlw[!duplicated(ewlw[,c("YEAR")]),] #replace distinct like this for compatibility reasons
#
# df0 <- left_join(df0, ewlw, by="YEAR")
# ewlw <- NULL
# ### Loop from position of maximum smoothed Mork to beginning of ring and exit when Mork threshold is crossed
# df0.sub <- NULL
# df0.sub <- df0[!is.na(df0$RADDISTR.BAND) & df0$RADDISTR.BAND<=df0$BANDMXMORK,] #only get data before maximum Mork
#
# dt <- data.table(df0.sub)
# # dt <- data.table(ddply(dt, "YEAR", mutate, MAXMORK=RobustMax(RTSR.S))) #add for each year the max. Mork
# dt <- data.table(ddply(dt, "YEAR", mutate, MAXMORK=max(RTSR.S,na.rm=T))) #add for each year the max. Mork
# dt <- dt[dt$MAXMORK>=1,] #extract years with max. Mork >=1; this is required to have a LW-EW transition
# dt <- setorder(dt, YEAR, -RADDISTR.BAND)
# dt <- dt[RTSR.S<1, .SD[1], by="YEAR"] #extract band with LW-EW transition
# dt$MAXMORK <- NULL
# ### Calculate some statistics per year: mean Mork, position of max. and min.
# dt1 <- data.table(df0.sub, key="YEAR", "RADDISTR.BAND")
# dt1 <- dt1[, list(MRTSR=mean(RTSR, na.rm=TRUE)
# , MINBAND=RobustMin(RADDISTR.BAND)
# , MAXRPOS=RobustMax(RRADDISTR)
# , MINRPOS=RobustMin(RRADDISTR))
# , by=key(dt1)]
# dt1 <- dt1[!is.na(dt1$YEAR),]
# ### Add information about band of max. Mork index to dt1; relevant for large bands
# dt2 <- data.table(df0.sub, key="YEAR")
# dt2 <- dt2[, list(BANDMXMORK=RADDISTR.BAND[which(RTSR.S==RobustMax(RTSR.S))], MRW=RobustMax(MRW)), by=key(dt2)]
# dt2 <- dt2[!is.na(dt2$YEAR),]
# dt1 <- merge(dt1, dt2, "YEAR", all=TRUE)
# dt2 <- NULL
#
# dt$BANDMXMORK <- NULL #remove column to avoid doubling during merge in next block
# dt$MRW <- NULL #remove column to avoid doubling during merge in next block
# ### Add statistics to main data table and replace any NA (if no EW-LW transition found!) by sensible values
# ewlw <- as.data.frame(merge(dt, dt1, "YEAR", all=TRUE))
# # ewlw$V2 <- NULL #remove unused column created during data.table manipulation
# # ewlw$RADDISTR.BAND <- ifelse(!is.na(ewlw$RADDISTR.BAND),ewlw$RADDISTR.BAND,ifelse(ewlw$MRTSR<1,ewlw$MINBAND,ewlw$BANDMXMORK)) #if no transition found, assign all to EW or LW depending on mean Mork
# # ewlw$RRADDISTR <- ifelse(!is.na(ewlw$RRADDISTR),ewlw$RRADDISTR,ifelse(ewlw$MRTSR<1,ewlw$MINRPOS,ewlw$MAXRPOS)) #if no transition found, assign all to EW or LW depending on mean Mork
# ewlw$RADDISTR.BAND <- ifelse(!is.na(ewlw$RADDISTR.BAND),ewlw$RADDISTR.BAND,ifelse(ewlw$MRTSR<1,ewlw$BANDMXMORK,ewlw$MINBAND)) #if no transition found, assign all to EW or LW depending on mean Mork
# ewlw$RRADDISTR <- ifelse(!is.na(ewlw$RRADDISTR),ewlw$RRADDISTR,ifelse(ewlw$MRTSR<1,ewlw$MAXRPOS,ewlw$MINRPOS)) #if no transition found, assign all to EW or LW depending on mean Mork
# ### Calculate EWW and LWW and the position of the EW-LW transition
# ewlw <- ewlw[,c("YEAR","RADDISTR.BAND","RRADDISTR","MRW","BANDMXMORK")]
# ewlw$EWW <- round(ewlw$MRW * (ewlw$RRADDISTR/100))
# ewlw$LWW <- ewlw$MRW - ewlw$EWW
# ewlw <- ewlw[,c("YEAR","EWW","LWW","RADDISTR.BAND","RRADDISTR")]
# colnames(ewlw)[4] <- "EWLW.BAND"
# colnames(ewlw)[5] <- "EWLW.RPOS"
# ### Merge newly created data with overall dataframe
# df0 <- left_join(df0, ewlw, by="YEAR")
# # df0$EWLW.ID <- as.factor(ifelse(df0$RADDISTR.BAND<=df0$EWLW.BAND,"ew","lw"))
# # df0$EWLW.ID <- as.factor(ifelse(df0$MRW >= RESO[r], #
# # ifelse(df0$RADDISTR.BAND<=df0$EWLW.BAND,"ew","lw"),
# # ifelse(df0$EWW>=df0$LWW,"ew","lw")))
# # df0$EWLW.ID <- as.factor(ifelse(df0$MRW >= RESO[r], #
# # ifelse(df0$EWLW.BAND==0, ifelse(df0$RTSR.S<1,"ew","lw"),
# # ifelse(df0$RADDISTR.BAND<=df0$EWLW.BAND, "ew","lw")),
# # ifelse(df0$RTSR.S<1,"ew","lw")))
# df0$EWLW.ID <- as.factor(ifelse(df0$MRW >= RESO[r], #
# ifelse(df0$EWLW.BAND==0, ifelse(df0$RTSR.S<1,"ew","lw"),
# ifelse(df0$RADDISTR.BAND<=df0$EWLW.BAND, "ew",
# ifelse(df0$RTSR.S<1,"ew","lw"))),
# ifelse(df0$RTSR.S<1,"ew","lw")))
#
# df0 <- df0[!is.na(df0$YEAR),] #for some reasons there can be years with NA
# # d <- df0[,c(1:6,22:28)]
# # fix(d)
#### THE PREVIOUS BLOCK COMMENTED AS IT IS THE OLD WAY OF CALCULATING EWW & LWWW
### 4.6.1.4 Calculate EWW, LWW and position of EW-LW transition ####
### Only calculate EWW and LWW on first run per sample as they are independent from aggregation method (STATS) and intra-ring resolution (RESO)
if (s==1 & r==1)
{
### 4.6.1.4.1 Create data frame with median Mork's index (RTSR) in 10-Ám steps
dt <- NULL
dt <- data.table(cells[,c("YEAR", "MRW", "RRADDISTR", "RADDISTR.ST", "RTSR")])
dt$YEAR <- as.factor(dt$YEAR)
dt$RADDISTR.BAND <- 10 * round((dt$RADDISTR.ST- 10 / 2.01) / 10 , 0) #add column where each cell is assigned to 10-Ám band; insert row if one band is missing
setkey(dt, "YEAR", "RADDISTR.BAND")
dt <- dt[, list(RRADDISTR=median(RRADDISTR, na.rm=TRUE)
, MRW=median(MRW, na.rm=TRUE)
, RTSR=median(RTSR, na.rm=TRUE))
, by=key(dt)]
df01 <- as.data.frame(dt)
### 4.6.1.4.2 Smooth Mork's profile for each ring separately; fill marginal values with averages (n = (smooth-1)/2)
smooth <- 5 #define smoothing kernel size
if (smooth>1)
{
for (y in unique(df01$YEAR))
{
df02 <- df01[df01$YEAR==y,]
margin <- c(mean(df02$RTSR[1:((smooth-1)/2)], na.rm=TRUE), NA ,mean(df02$RTSR[c((ln<-length(df02$RTSR)):(ln-((smooth-1)/2)+1))], na.rm=TRUE)) #define the values at lower margin (first value), if NA (second value), and at upper margin (third value)
df02$RTSR.S <- rollapply(df02$RTSR, smooth, fill=margin, median, na.rm=TRUE) #apply rolling mean
df01$RTSR.S[df01$YEAR==y] <- df02$RTSR.S
}
} else
{
df01$RTSR.S <- df01$RTSR
}
### 4.6.1.4.3 If the intra-ring profile does not contain any band with Mork > MORK.EWLW after smoothing, replace the values by the original values
dt <- NULL
dt <- data.table(df01)
dt <- data.table(ddply(dt, "YEAR", mutate, MAXMORK=max(RTSR.S,na.rm=T))) #find for each year the max. Mork
dt$RTSR.S <- ifelse(dt$MAXMORK<MORK.EWLW, dt$RTSR, dt$RTSR.S) #Replace RTSR.S by RTSR if max MAXMORK < 1
dt$MAXMORK <- NULL
df01 <- as.data.frame(dt)
### 4.6.1.4.4 Find band with maximum Mork index excluding the very first part of the earlywood
dt <- NULL
dt <- data.table(df01[(df01$RRADDISTR/100*df01$MRW>=50 | df01$RRADDISTR>33),]) #only consider bands that are at least 50 micrometer away from ring border OR later than relative intra-ring position 33%
# setkey(dt, "YEAR")
# suppressWarnings(dt <- dt[, list(BANDMXMORK=RADDISTR.BAND[which(RTSR.S==max(RTSR.S,na.rm=T))]), by=key(dt)])
dt <- dt[, list(BANDMXMORK=RADDISTR.BAND[which(RTSR.S==max(RTSR.S,na.rm=T))]), by="YEAR"]
ewlw <- as.data.frame(dt)
ewlw <- ewlw[!duplicated(ewlw$YEAR),] #to make sure only the first of equal max. values within a year is taken!
df01 <- left_join(df01, ewlw, by="YEAR")
ewlw <- NULL
### 4.6.1.4.5 Loop from position of maximum smoothed Mork to beginning of ring and exit when Mork threshold is crossed
df01.sub <- NULL
df01.sub <- df01[!is.na(df01$RADDISTR.BAND) & df01$RADDISTR.BAND<=df01$BANDMXMORK,] #only get data before maximum Mork
dt <- data.table(df01.sub)
dt <- data.table(ddply(dt, "YEAR", mutate, MAXMORK=max(RTSR.S,na.rm=T), MINMORK=min(RTSR.S,na.rm=T))) #add for each year the max. & min. Mork
dt <- dt[dt$MAXMORK>=MORK.EWLW & dt$MINMORK<MORK.EWLW,] #extract years with max. Mork >= MORK.EWLW & min. Mork < MORK.EWLW; this is required to have a LW-EW transition
dt$MAXMORK <- NULL #remove unused column
dt$MINMORK <- NULL #remove unused column
dt <- setorder(dt, YEAR, -RADDISTR.BAND)
dt <- dt[RTSR.S<MORK.EWLW, .SD[1], by=YEAR] #extract band with LW-EW transition
### 4.6.1.4.6 Calculate some statistics per year: mean Mork, position of max. and min.
# dt1 <- data.table(df01.sub, key="YEAR", "RADDISTR.BAND") #=> BETTER USE DF01, NOT SUBSET?
dt1 <- data.table(df01, key="YEAR") #=> BETTER USE DF01, NOT SUBSET?
dt1 <- dt1[, list(MRTSR=mean(RTSR, na.rm=TRUE))
# , MINBAND=RobustMin(RADDISTR.BAND)
# , MAXRPOS=RobustMax(RRADDISTR)
# , MINRPOS=RobustMin(RRADDISTR))
, by=key(dt1)]
dt1 <- dt1[!is.na(dt1$YEAR),]
### 4.6.1.4.7 Add information about band of max. Mork index to dt1; relevant if large intra-ring resolution was opted for
# dt2 <- data.table(df01.sub)
dt2 <- data.table(df01.sub[(df01.sub$RRADDISTR/100*df01.sub$MRW>=50 | df01.sub$RRADDISTR>33),]) #only consider bands that are at least 50 micrometer away from ring border OR later than relative intra-ring position 33%
dt2 <- dt2[, list(BANDMXMORK=RADDISTR.BAND[which(RTSR.S==RobustMax(RTSR.S))], MRW=RobustMax(MRW)), by="YEAR"]
dt2 <- dt2[!is.na(dt2$YEAR),]
dt1 <- merge(dt1, dt2, "YEAR", all=TRUE)
dt2 <- NULL
dt$BANDMXMORK <- NULL #remove column to avoid doubling during merge in next block
dt$MRW <- NULL #remove column to avoid doubling during merge in next block
### 4.6.1.4.8 Add statistics to main ewlw data table and replace any NA (if no EW-LW transition found!) by sensible values
ewlw <- as.data.frame(merge(dt, dt1, "YEAR", all=TRUE))
# ewlw$RADDISTR.BAND <- ifelse(!is.na(ewlw$RADDISTR.BAND),ewlw$RADDISTR.BAND,ifelse(ewlw$MRTSR<MORK.EWLW,ewlw$BANDMXMORK,ewlw$MINBAND)) #if no transition found, assign all to EW or LW depending on mean Mork
ewlw$RADDISTR.BAND <- ifelse(!is.na(ewlw$RADDISTR.BAND),ewlw$RADDISTR.BAND,ifelse(ewlw$MRTSR<MORK.EWLW,ewlw$BANDMXMORK,0)) #if no transition found, assign all to EW or LW depending on mean Mork
# ewlw$RRADDISTR <- ifelse(!is.na(ewlw$RRADDISTR),ewlw$RRADDISTR,ifelse(ewlw$MRTSR<MORK.EWLW,ewlw$MAXRPOS,ewlw$MINRPOS)) #if no transition found, assign all to EW or LW depending on mean Mork
ewlw$RRADDISTR <- ifelse(!is.na(ewlw$RRADDISTR),ewlw$RRADDISTR,ifelse(ewlw$MRTSR<MORK.EWLW,100,0)) #if no transition found, assign all to EW or LW depending on mean Mork
### 4.6.1.4.9 Calculate EWW and LWW and the position of the EW-LW transition
ewlw <- ewlw[,c("YEAR","RADDISTR.BAND","RRADDISTR","MRW","BANDMXMORK")]
ewlw$EWW <- round(ewlw$MRW * (ewlw$RRADDISTR/100))
ewlw$LWW <- ewlw$MRW - ewlw$EWW
ewlw <- ewlw[,c("YEAR","EWW","LWW","RADDISTR.BAND","RRADDISTR")]
colnames(ewlw)[4] <- "EWLW.BAND"
colnames(ewlw)[5] <- "EWLW.RPOS"
} #end of if (s==1 & r==1)
### 4.6.1.4.10 Merge newly created data with overall dataframe
df0$YEAR <- as.factor(df0$YEAR)
df0 <- left_join(df0, ewlw, by="YEAR")
#### 4.6.1.4.11 Express last EW band within each ring in target resolution
# df0$EWLW.BAND <- round(df0$EWLW.BAND / RESO[r]) * RESO[r]
df0$EWLW.BAND <- round((df0$EWLW.BAND-(RESO[r]/2-1)) / RESO[r]) * RESO[r]
### 4.6.1.4.12 Add column indicating for each band, whether it is earlywood or latewood
df0$EWLW.ID <- as.factor(ifelse(df0$MRW >= RESO[r], #
ifelse(df0$EWLW.BAND==0, ifelse(df0$RTSR<MORK.EWLW,"ew","lw"),
ifelse(df0$RADDISTR.BAND<=df0$EWLW.BAND, "ew",
ifelse(df0$RTSR<MORK.EWLW,"ew","lw"))),
# ifelse(df0$RADDISTR.BAND<=df0$EWLW.BAND, "ew", "lw")),
ifelse(df0$RTSR<MORK.EWLW,"ew","lw")))
df0 <- df0[!is.na(df0$YEAR),] #for some reasons there can be years with NA
### 4.6.1.4.13 If a ring has no latewood band based on the above (likely due to low intra-ring resolution),
### check whether last band(s) have a Mork's index > MORK.EWLW
if (length(unique(df0$YEAR)[!unique(df0$YEAR) %in% unique(df0$YEAR[df0$EWLW.ID=="lw"])]) > 0) #check for any ring without minimum 1 latewood band
{
for (y in unique(df0$YEAR)[!unique(df0$YEAR) %in% unique(df0$YEAR[df0$EWLW.ID=="lw"])]) #loop through all found rings
{
# if (!is.na(unique(df0$EWLW.BAND[df0$YEAR==y][1])) & !is.na(df0$RADDISTR.BAND[df0$YEAR==y])) #exclude years with Mork's index = NA (e.g., when ring was not finished/completed at the end of the image -> stitching mistake!)
if (length(is.na(df0$EWLW.BAND[df0$YEAR==y]))==0 & length(is.na(df0$RADDISTR.BAND[df0$YEAR==y]))==0) #exclude years with Mork's index = NA (e.g., when ring was not finished/completed at the end of the image -> stitching mistake!)
{
for (j in seq(unique(df0$EWLW.BAND[df0$YEAR==y]),max(df0$RADDISTR.BAND[df0$YEAR==y], na.rm=TRUE),by=RESO[r])) #loop through all bands starting from latest earlywood band towards ring border
{
if (!is.na(df0$RTSR[df0$YEAR==y & df0$RADDISTR.BAND==j])) #exclude bands with Mork's index = NA
{
if (df0$RTSR[df0$YEAR==y & df0$RADDISTR.BAND==j] >= MORK.EWLW) #is the mean Mork's index for the target band >= MORK.EWLW?
{
df0$EWLW.ID[df0$YEAR==y & df0$RADDISTR.BAND==j] <- "lw" #... if yes, assign it to latewood
}
}
}
}
}
}
# ### Optional: evaluate how many years have no latewood and plot it
# d <- df0[df0$YEAR %in% unique(df0$YEAR)[!unique(df0$YEAR) %in% unique(df0$YEAR[df0$EWLW.ID=="lw"])],]
# d <- d[,c("YEAR","WOODID","ID","RADDISTR.BAND","MRW","RTSR","EWW","LWW","EWLW.BAND","EWLW.ID")]
# fix(d)
### CONSIDER TO ADD HERE CODE TO FORCE THE LAST BAND OF EACH RING TO BE LATEWOOD
### 4.6.1.5 Add missing bands (bands containing no cells) ####
### Create data.table with number of bands for each year
dt <- NULL
dt <- data.table(df0)
setkey(dt, "YEAR")
dt <- dt[, list(MRW=RobustMax(MRW)), by=key(dt)]
dt$MOD <- (dt$MRW-(RESO[r]/2)) %% RESO[r] #get number of bands to be used considering the ring width of target ring
dt$NUMBAND <- ifelse(dt$MRW-(RESO[r]/2)>0, (dt$MRW-(RESO[r]/2)-dt$MOD), 0)
dt <- dt[!is.na(dt$YEAR),] #in some cases there is a leading year with NA
### 4.6.1.6 Create data frame with completed series of intra-annual bands for each calendar year ####
allyears <- lapply(1:length(unique(dt$YEAR)),function(y){seq(0, dt$NUMBAND[y], RESO[r])})
bandlist <- data.table(BANDS=unlist(allyears))
bandlist$YEAR <- NA
bandlist$YEAR[which(bandlist$BANDS==0)] <- as.character(dt$YEAR) #write calendar year at first intra-annual bands
bandlist$YEAR <- na.locf(bandlist$YEAR) #last observation carried forward
bandlist$YEAR <- as.factor(bandlist$YEAR)
df1 <- as.data.frame(merge(bandlist,dt,"YEAR"))
colnames(df1)[2] <- "RADDISTR.BAND"
df1[3:5] <- list(NULL)
# ### Remove first bands that are not included in the first ring (partial ring) #THIS IS PROBABLY NO MORE NEEDED!
# # min.year <- min(df0$YEAR, na.rm=TRUE)
# first.row <- df0$RADDISTR.BAND[1] / RESO[r] + 1
# df1 <- df1[c(first.row:nrow(df1)),]
### 4.6.1.7 Merge with master data frame to fill gaps of bands ####
df2 <- full_join(df0,df1, by=c("YEAR", "RADDISTR.BAND"))
df2 <- arrange(df2, YEAR, RADDISTR.BAND) #bring data frame into original order: year-band
### Some dataframe housekeeping
setDT(df2)[, MRW:= MRW[!is.na(MRW)][1L], by=YEAR] #if NA-line, copy MRW from first MRW-value of respective YEAR
setDT(df2)[, WOODID:=WOODID[!is.na(WOODID)][1L], by=YEAR] #if NA-line, copy WOODID from first WOODID-value of respective YEAR
setDT(df2)[, ID:=ID[!is.na(ID)][1L], by=YEAR] #if NA-line, copy ID from first ID-value of respective YEAR
df2$N.BAND <- ifelse(is.na(df2$N.BAND), 0, df2$N.BAND) #replace NA by 0 for number of cells in target band
df2$N.TRUE <- ifelse(is.na(df2$N.TRUE), 0, df2$N.TRUE) #replace NA by 0 for number of cells in target band
df2 <- df2[!is.na(df2$RADDISTR.BAND),] #remove rows with NA at RADDISTR.BAND (possible if no overlapping rings and ring at lower image edge not finished)
### Create first version of intra-annual profiles dataframe
iap <- as.data.frame(df2)
iap$EWLW.ID <- as.factor(iap$EWLW.ID)
iap$EWLW.ID[iap$EWLW.ID==""] <- NA
iap$YEAR <- as.numeric(as.character(iap$YEAR))
colnames(iap)[3] <- "IMAGE"
### 4.6.1.8 Add column with the relative position of each band in each year (for plotting keeping constant ring widths) ####
iap$YR.RRADDISTR.BAND <- iap$RADDISTR.BAND / iap$MRW * 100 #express each band as a percentage of entire ring width
iap$YR.RRADDISTR.BAND <- ifelse(iap$YR.RRADDISTR.BAND >= 100, 99.99, iap$YR.RRADDISTR.BAND) #replace any 100 positions by 99.99 (should not be required any more)
iap$YR.RRADDISTR.BAND <- round(100*iap$YR.RRADDISTR.BAND, 0)
iap$YR.RRADDISTR.BAND <- sprintf("%04d", iap$YR.RRADDISTR.BAND) #convert to text
iap$YR.RRADDISTR.BAND <- as.numeric(paste(as.character(iap$YEAR), as.character(iap$YR.RRADDISTR.BAND), sep=".")) #merge year and rel. position information
### 4.6.1.9 Add column with continuous absolute distance based on standardized bands (for plotting considering different ring widths) ####
### If using the "special" last band, make sure it is done correctly without creating artifacts!
df5 <- NULL
offset <- RESO[r]
for (y in unique(iap$YEAR))
{
df4 <- iap$RADDISTR.BAND[iap$YEAR==y] #extract data for target year
df4[length(df4)] <- iap$MRW[iap$YEAR==y][1] - RESO[r]
# combine data frames from all years
if (y==iap$YEAR[1])
{
df5 <- df4
} else
{
df5 <- c(df5, max(df5)+offset+df4)
}
}
df5 <- df5 + RESO[r]/2 #center in band
iap$RADDIST.CONT <- df5
### 4.6.1.20 Add label for x-axis (only at beginning of each calendar year) ####
iap$X.LABEL <- ifelse(iap$RADDISTR.BAND==0, iap$YEAR, NA)
### 4.6.1.21 Insert line with NA where there are missing rings ####
repeat
{
missingrow <- FALSE
for (h in 2:(nrow(iap)-1))
{
if (iap$YEAR[h] > iap$YEAR[h-1] + 1)
{
newrow <- as.data.frame(t(rep(as.numeric(NA),ncol(iap))))
colnames(newrow) <- colnames(iap)
newrow$YEAR <- as.numeric(iap$YEAR[h-1] + 1)
newrow$WOODID <- as.factor(as.character(iap$WOODID[h-1]))
newrow$IMAGE <- as.factor(as.character(iap$IMAGE[h-1]))
iap <- rbind(iap[1:(h-1),], newrow, iap[-(1:(h-1)),])
newrow <- NULL
missingrow <- TRUE
break
}
}
if (missingrow == FALSE)
{
break
}
}
row.names(iap) <- NULL
iap$RADDISTR.BAND <- as.numeric(iap$RADDISTR.BAND)
### 4.6.1.22 Remove years with uncertain dating based on file _YearsToExclude.txt ####
if (woodid %in% exclude$WOODID)
{
iap[iap$YEAR %in% exclude$YEAR[exclude$WOODID==woodid]==TRUE, c(4:(ncol(iap)-3))] <- NA
}
write.table(iap, file=paste(woodid, "_IntraannualProfiles_", STATS[s], "_", RESO[r], "mu.txt", sep=""), row.names = FALSE)
### 4.6.1.23 Create new dataframe excluding rows that have NA for the continuous radial distance ####
# iap2 <- iap[!is.na(iap$RADDIST.CONT),]
iap2 <- iap
### 4.6.1.24 Smooth the data by a rolling mean ####
if (SMOOTHER > 1)
{
margin <- c(mean(iap2$LA[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$LA[c((ln<-length(iap2$LA)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$LA <- rollapply(iap2$LA, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$DRAD[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$DRAD[c((ln<-length(iap2$DRAD)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$DRAD <- rollapply(iap2$DRAD, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$DTAN[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$DTAN[c((ln<-length(iap2$DTAN)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$DTAN <- rollapply(iap2$DTAN, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$TCA[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$TCA[c((ln<-length(iap2$TCA)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$TCA <- rollapply(iap2$TCA, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$CWA[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$CWA[c((ln<-length(iap2$CWA)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$CWA <- rollapply(iap2$CWA, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$CWAACC[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$CWAACC[c((ln<-length(iap2$CWAACC)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$CWAACC <- rollapply(iap2$CWAACC, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$CWTALL[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$CWTALL[c((ln<-length(iap2$CWTALL)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$CWTALL <- rollapply(iap2$CWTALL, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$CWTALL.ADJ[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$CWTALL.ADJ[c((ln<-length(iap2$CWTALL.ADJ)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$CWTALL.ADJ <- rollapply(iap2$CWTALL.ADJ, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$CWTTAN[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$CWTTAN[c((ln<-length(iap2$CWTTAN)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$CWTTAN <- rollapply(iap2$CWTTAN, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$CWTRAD[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$CWTRAD[c((ln<-length(iap2$CWTRAD)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$CWTRAD <- rollapply(iap2$CWTRAD, SMOOTHER, fill=margin, mean, na.rm=TRUE)
iap2$RTSR <- rollapply(iap2$RTSR, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$CTSR[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$CTSR[c((ln<-length(iap2$CTSR)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$CTSR <- rollapply(iap2$CTSR, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$DCWT2[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$DCWT2[c((ln<-length(iap2$DCWT2)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$DCWT2 <- rollapply(iap2$DCWT2, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$DCWT[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$DCWT[c((ln<-length(iap2$DCWT)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$DCWT <- rollapply(iap2$DCWT, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$DCWA[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$DCWA[c((ln<-length(iap2$DCWA)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$DCWA <- rollapply(iap2$DCWA, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$TB2[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$TB2[c((ln<-length(iap2$TB2)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$TB2 <- rollapply(iap2$TB2, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$KH[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$KH[c((ln<-length(iap2$KH)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$KH <- rollapply(iap2$KH, SMOOTHER, fill=margin, mean, na.rm=TRUE)
margin <- c(mean(iap2$DH[1:((SMOOTHER-1)/2)], na.rm=TRUE), NA ,mean(iap2$DH[c((ln<-length(iap2$DH)):(ln-((SMOOTHER-1)/2)))], na.rm=TRUE))
iap2$DH <- rollapply(iap2$DH, SMOOTHER, fill=margin, mean, na.rm=TRUE)
}
### 4.6.1.25 Define some general options for plotting intra-annual profiles ####
### 4.6.1.25.1 Image dimensions ####
im.width <- round(0.3*sum(rings$MRW)) #scale image width by overall length in absolute units: 0.3 pixels per micrometer width
im.height <- 1200
### 4.6.1.25.2 If Quality check mode is activated, perform several quality control operations ####
if (PLOT.IMG.ID==TRUE)
{
### 4.6.1.25.2.1 Define position of grey-white background rectangles indicating changes in data source from one image to the next, and get image caption of each data block ####
iap3 <- iap2[,c("YEAR","IMAGE","RADDIST.CONT","X.LABEL")] #subset intra-annual dataframe
iap3 <- iap3[!is.na(iap3$X.LABEL),] #extract rows at the beginning of each ring
iap3$BLOCK <- 1 #initialize grouping column
for (n in c(2:nrow(iap3))) #give same index to rows getting data from the same image; if data source flips between two images where they overlap, treat each segment as independent group
{
if (iap3$IMAGE[n]!=iap3$IMAGE[n-1])
{
iap3$BLOCK[n] <- iap3$BLOCK[n-1] + 1
}else
{
iap3$BLOCK[n] <- iap3$BLOCK[n-1]
}
}
iap3$BLOCK <- as.factor(iap3$BLOCK) #convert grouping index to factor
iap3 <- iap3[!duplicated(iap3[c("BLOCK")]),] #extract the first row of each group
if (nrow(iap3)>1) #if there is more than one group, define corner coordinates of grouping background rectangles
{
topleft <- iap3$RADDIST.CONT[seq(2,nrow(iap3),2)] #coordinate of every other group at top left corner
bottomright <- iap3$RADDIST.CONT[seq(3,nrow(iap3)+1,2)] #coordinate of every other group at bottom right corner
if (is.na(bottomright[length(bottomright)])) #if number of groups is even, the last coordinate at the bottom right corner is NA, but needs to get the outmost coordinate in the series
{
bottomright[length(bottomright)] <- max(iap2$RADDIST.CONT,na.rm=TRUE)
}
}else #if there is only one group, set coordinates to 0
{
topleft <- 0
bottomright <- 0
}
### 4.6.1.25.2.2 Check for too few cells in the bands ####
iap4 <- NULL
if (length(which(iap2$N.BAND < NCELL.MIN & !is.na(iap2$LA))) > 0) #only extract bands if there are any with too few cells that are not NA
{
iap4 <- iap2[which(iap2$N.BAND < NCELL.MIN & !is.na(iap2$LA)),c("RADDIST.CONT","N.BAND","LA")]
}
### 4.6.1.25.2.3 Calculate nth (e.g., 90th) percentile of lumen area per image or contiguous image segment, respectively ####
iap5 <- NULL
dt <- NULL
dt <- data.table(iap2)
dt$BLOCK <- as.numeric(match(dt$IMAGE, levels(dt$IMAGE))) #assign numeric index of unique source image to grouping column
setDT(dt)[ , BLOCK2:=cumsum(c(1L, (BLOCK!=shift(BLOCK,1L))[-1L]))] #create second grouping column that holds the image; if data source flips between two images where they overlap, treat each segment as independent group
dt$BLOCK2 <- as.factor(dt$BLOCK2)
setkey(dt, "BLOCK2")
dt <- dt[, list(IMAGE=unique(IMAGE) #aggregate per image segment and calculate lumen area quantile
, LA90=quantile(LA, LA.REF, na.rm=TRUE)
, X.START=min(RADDIST.CONT, na.rm=TRUE)
, X.END=max(RADDIST.CONT, na.rm=TRUE)+RESO[r]),
by=key(dt)]
iap5 <- as.data.frame(setorder(dt, -BLOCK2))
### 4.6.1.25.2.4 Find first value after a (block) of NAs to identify gaps in lumen area profile ####
iap7 <- NULL
if (sum(is.na(iap2$LA)) > 0) #check whether there is any NA
{
iap7 <- iap2[which(is.na(iap2$LA)), c("RADDIST.CONT","LA")] #extract rows with NA for lumen area
iap7$ID <- as.numeric(row.names(iap7))
if (nrow(iap7) > 1) #check whether there is more than 1 NA
{
for (n in 1:c(nrow(iap7)-1))
{
if (iap7$ID[n]+1 == iap7$ID[n+1]) #assign -999 if NA-row is following another directly adjacent NA-row
{
iap7$ID[n] <- -999
}
}
}
iap7 <- iap7[which(iap7$ID>0), "ID"] + 1 #extract NA-rows that are not flagged -999
iap7 <- iap2$RADDIST.CONT[iap7] #extract radial distance of first bands after a gap
iap7 <- iap7[!is.na(iap7)]
}
### 4.6.1.25.2.5 Check for potential duplicated rings due to wrong visual cross-dating ####
iap8 <- NULL
k <- 0
for (n in c(2:length(unique(iap2$YEAR)))) #loop through all years
{
if (sum(!is.na(iap2$LA[iap2$YEAR==unique(iap2$YEAR)[n]]),na.rm=TRUE) > 0 & sum(!is.na(iap2$LA[iap2$YEAR==unique(iap2$YEAR)[n-1]]),na.rm=TRUE) > 0) #exclude years with only NA
{
if (abs(1-(unique(iap2$MRW[iap2$YEAR==unique(iap2$YEAR)[n]]) / unique(iap2$MRW[iap2$YEAR==unique(iap2$YEAR)[n-1]]))) < DUP.RW) #only evaluate if ring width of neighboring rings differ by <n% (e.g., 5%)
{
if (ks.test(iap2$LA[iap2$YEAR==unique(iap2$YEAR)[n]], iap2$LA[iap2$YEAR==unique(iap2$YEAR)[n-1]])$p.value > DUP.KS) #perform Kolmogorov-Smirnoff test and compare p-value against threshold
{
k <- k+1
iap8[k] <- unique(iap2$YEAR)[n] - 1 #add calendar year to list
k <- k+1
iap8[k] <- unique(iap2$YEAR)[n] #add calendar year to list
}
}
}
}
# iap8 <- unique(iap2$YEAR)[c(which(iap8 > DUP.KS)-1, which(iap8 > DUP.KS))] #assume potential duplicate ring if similar ring width and similar shape (p-vaue of ks > n (e.g., 0.95))
### 4.6.1.25.2.6 Calculate smoothed profile serving as a reference to identify (large) outliers ####
iap9 <- iap2[,c("RADDIST.CONT","LA")]
margin <- c(mean(iap9$LA[1:((RM.KERNEL-1)/2)], na.rm=TRUE), NA ,mean(iap9$LA[c((ln<-length(iap9$LA)):(ln-((RM.KERNEL-1)/2)))], na.rm=TRUE))
iap9$LA <- rollapply(iap9$LA, RM.KERNEL, fill=margin, mean, na.rm=TRUE)
}
### 4.6.1.26 Plot and save intra-annual profile of LA (lumen area); add additional QC features if opted for ####
png(file=paste(woodid, "_Intraprofile_LA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$LA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Lumen Area (LA) (Ám2)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(25,1500))
cex.axis=3, cex.lab=3, ylim=c(25,max(iap2$LA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
### add QC features
if (PLOT.IMG.ID==TRUE)
{
### Grey-white background and image captions
lim <- par("usr")
if (length(bottomright)>1)
{
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray95", col = "gray95")
}
text(x=iap3$RADDIST.CONT,y=max(iap2$LA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T),iap3$IMAGE,adj=c(0,0),cex=3)
### Highlight image-segment level lumen area quantiles if deviating strongly from neighboring segments
if (nrow(iap5) >=2) #only apply if minimum 2 segments
{
# if (!is.na(iap5$LA90[1]) & !is.na(iap5$LA90[2]) & (iap5$LA90[2]/iap5$LA90[1] < (1-2*(1-LA.THR)) | iap5$LA90[1]/iap5$LA90[2] < (1-2*(1-LA.THR)))) #at beginning: if 1st segments is much smaller than 2nd segment; verify also that lumen area quantiles are not NA
if (!is.na(iap5$LA90[1]) & !is.na(iap5$LA90[2]) & iap5$LA90[1]/iap5$LA90[2] < (1-2*(1-LA.THR))) #at beginning: if 1st segments is much smaller than 2nd segment; verify also that lumen area quantiles are not NA
{
lines(x=c(iap5$X.START[1], iap5$X.END[1]), y=c(iap5$LA90[1], iap5$LA90[1]), lty=1, lwd=60, col="yellow")
}
# if (!is.na(iap5$LA90[nrow(iap5)]) & !is.na(iap5$LA90[nrow(iap5)-1]) & (iap5$LA90[nrow(iap5)-1]/iap5$LA90[nrow(iap5)] < (1-2*(1-LA.THR)) | iap5$LA90[nrow(iap5)]/iap5$LA90[nrow(iap5)-1] < (1-2*(1-LA.THR)))) #at end: if last segments is much smaller than 2nd last segment; verify also that lumen area quantiles are not NA
if (!is.na(iap5$LA90[nrow(iap5)]) & !is.na(iap5$LA90[nrow(iap5)-1]) & iap5$LA90[nrow(iap5)]/iap5$LA90[nrow(iap5)-1] < (1-2*(1-LA.THR))) #at end: if last segments is much smaller than 2nd last segment; verify also that lumen area quantiles are not NA
{
lines(x=c(iap5$X.START[nrow(iap5)], iap5$X.END[nrow(iap5)]), y=c(iap5$LA90[nrow(iap5)], iap5$LA90[nrow(iap5)]), lty=1, lwd=60, col="yellow")
}
}
if (nrow(iap5) >= 3) #for the non-marginal segments
{
for (n in c(2:(nrow(iap5)-1)))
{
if(!is.na(iap5$LA90[n]) & !is.na(iap5$LA90[n-1]) & !is.na(iap5$LA90[n+1])) #verify that the considered lumen area quantiles are not NA
{
# if (((2 * iap5$LA90[n]) / (iap5$LA90[n-1] + iap5$LA90[n+1])) < LA.THR | ((iap5$LA90[n-1] + iap5$LA90[n+1]) / (2 * iap5$LA90[n])) < LA.THR) #highlight segment if considerable smaller or larger lumen area quantile than for the average of its neighboring segments
if ((2 * iap5$LA90[n] / (iap5$LA90[n-1] + iap5$LA90[n+1])) < LA.THR) #highlight segment if considerable smaller or larger lumen area quantile than for the average of its neighboring segments
{
lines(x=c(iap5$X.START[n], iap5$X.END[n]), y=c(iap5$LA90[n], iap5$LA90[n]), lty=1, lwd=60, col="yellow")
}
}
}
}
### Add horizontal lines for all image-segments with their average lumen area percentile
for (n in c(1:nrow(iap5)))
{
lines(x=c(iap5$X.START[n], iap5$X.END[n]), y=c(iap5$LA90[n], iap5$LA90[n]), lty=2, lwd=6)
}
### Add yellow circle for bands based on very few individual cells, positioned at y = corresponding lumen area
if (length(iap4) > 0)
{
points(iap4$RADDIST.CONT, iap4$LA, pch=21, col="black", bg="yellow", lwd=5, cex=40)
}
### Add red circle at beginning of previously interrupted (gap) profile at y = 0
if (length(iap7) > 0)
{
points(iap7, rep(lim[3]+10,length(iap7)), pch=21, col="black", bg="red", lwd=5, cex=40)
}
### Highlight neighboring rings that are potential duplicated due to a visual cross-dating mistake
if (length(iap8) > 0)
{
for (n in c(1:length(iap8)))
{
lines(y=iap2$LA[iap2$YEAR==iap8[n]], x=iap2$RADDIST.CONT[iap2$YEAR==iap8[n]], lwd=60, col="yellow")
}
}
### Add green triangles to mark potential (large) outliers
lines(y=iap9$LA, x=iap9$RADDIST.CONT, lwd=3) #add smoothed reference line
# iap10 <- which(iap2$LA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]/iap9$LA > OUT.THR & iap9$LA > quantile(iap9$LA, OUT.QU, na.rm=TRUE)) #extract (large) outliers
# iap10 <- which(iap2$LA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]-iap9$LA > max(iap9$LA,na.rm=TRUE)/3 & iap9$LA > quantile(iap9$LA, OUT.QU, na.rm=TRUE)) #extract (large) outliers
iap10 <- which((iap2$LA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]/iap9$LA > OUT.THR | iap2$LA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]-iap9$LA > quantile(iap9$LA,0.99,na.rm=TRUE)/2) & iap9$LA > quantile(iap9$LA, OUT.QU, na.rm=TRUE)) #extract (large) outliers
points(x=iap2$RADDIST.CONT[iap10], y=iap2$LA[iap10], pch=25, col="black", bg="green", lwd=5, cex=30) #plot outliers
}
lines(y=iap2$LA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Lumen Area (LA) (Ám2)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.27 Plot and save intra-annual profile of DRAD (radial lumen diameter) ####
png(file=paste(woodid, "_Intraprofile_DRAD_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$DRAD[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Radial lumen diameter (DRAD) (Ám)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(25,1500))
cex.axis=3, cex.lab=3, ylim=c(0,max(iap2$DRAD[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$DRAD[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Radial lumen diameter (DRAD) (Ám)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.28 Plot and save intra-annual profile of DTAN (tangential lumen diameter) ####
png(file=paste(woodid, "_Intraprofile_DTAN_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$DTAN[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Tangential lumen diameter (DTAN) (Ám)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(25,1500))
cex.axis=3, cex.lab=3, ylim=c(0,max(iap2$DTAN[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$DTAN[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Tangential lumen diameter (DTAN) (Ám)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.29 Plot and save intra-annual profile of TCA (total cell area) ####
png(file=paste(woodid, "_Intraprofile_TCA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$TCA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Cell Area (TCA) (Ám2)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0,4000))
cex.axis=3, cex.lab=3, ylim=c(25,max(iap2$TCA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$TCA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Cell Area (TCA) (Ám2)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.30 Plot and save intra-annual profile of CWA (cell wall area) ####
png(file=paste(woodid, "_Intraprofile_CWA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$CWA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Cell Wall Area (CWA) (Ám2)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0,2000))
cex.axis=3, cex.lab=3, ylim=c(25,max(iap2$CWA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$CWA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Cell Wall Area (CWA) (Ám2)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.31 Plot and save intra-annual profile of CWAACC (accumulated cell wall area per band) ####
png(file=paste(woodid, "_Intraprofile_CWAACC_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$CWAACC[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Accumulated Cell Wall Area (CWAACC) (Ám2)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0,2000))
cex.axis=3, cex.lab=3, ylim=c(25,max(iap2$CWAACC[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$CWAACC[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Accumulated Cell Wall Area (CWAACC) (Ám2)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.32 Plot and save intra-annual profile of CWTALL (mean cell wall thickness) ####
png(file=paste(woodid, "_Intraprofile_CWTALL_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$CWTALL[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Overall Cell Wall thickness (CWTALL) (Ám)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(1.5,10.0))
cex.axis=3, cex.lab=3, ylim=c(1.5,max(iap2$CWTALL[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$CWTALL[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Overall Cell Wall thickness (CWTALL) (Ám)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.33 Plot and save intra-annual profile of CWTALL.ADJ (adjusted mean cell wall thickness) ####
png(file=paste(woodid, "_Intraprofile_CWTALL.ADJ_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$CWTALL.ADJ[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Overall adjusted Cell Wall thickness (CWTALL.ADJ) (Ám)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(1.5,10.0))
cex.axis=3, cex.lab=3, ylim=c(1.5,max(iap2$CWTALL.ADJ[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$CWTALL.ADJ.ADJ[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Overall adjusted Cell Wall thickness (CWTALL.ADJ) (Ám)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.34 Plot and save intra-annual profile of CWTRAD (cell wall thickness of radial wall) ####
png(file=paste(woodid, "_Intraprofile_CWTRAD_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$CWTRAD[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Radial Cell Wall thickness (CWTRAD) (Ám)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(1.5,10.0))
cex.axis=3, cex.lab=3, ylim=c(1.5,max(iap2$CWTRAD[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$CWTRAD[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Radial Cell Wall thickness (CWTRAD) (Ám)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.35 Plot and save intra-annual profile of CWTTAN (cell wall thickness of tangential wall) ####
png(file=paste(woodid, "_Intraprofile_CWTTAN_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$CWTTAN[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Tangential Cell Wall thickness (CWTTAN) (Ám)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(1.5,9.0))
cex.axis=3, cex.lab=3, ylim=c(1.5,max(iap2$CWTRAD[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$CWTTAN[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Tangential Cell Wall thickness (CWTTAN) (Ám)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.36 Plot and save intra-annual profile of RTSR (Mork's index) ####
png(file=paste(woodid, "_Intraprofile_Mork_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$RTSR[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Mork's index", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.2,7))
cex.axis=3, cex.lab=3, ylim=c(0.2,max(iap2$RTSR[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$RTSR[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Mork's index", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.37 Plot and save intra-annual profile of CTSR (adjusted Mork's index) ####
png(file=paste(woodid, "_Intraprofile_Mork.ADJ_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$CTSR[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Adjusted Mork's index (4*CWTALL/D)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.2,7))
cex.axis=3, cex.lab=3, ylim=c(0.2,max(iap2$CTSR[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$CTSR[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Adjusted Mork's index (4*CWTALL/D)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.38 Plot and save intra-annual profile of DCWT (CWT-based relative anatomical density) ####
png(file=paste(woodid, "_Intraprofile_DCWT_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$DCWT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Rel. Anatomical Density (CWT based)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.2,0.9))
cex.axis=3, cex.lab=3, ylim=c(0.1,max(iap2$DCWT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$DCWT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Rel. Anatomical Density (CWT based)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.39 Plot and save intra-annual profile of DCWT2 (special relative anatomical density: CWTRAD/DRAD) ####
png(file=paste(woodid, "_Intraprofile_DCWT2_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$DCWT2[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Rel. Anatomical Density (CWTRAD/DRAD)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.2,0.9))
cex.axis=3, cex.lab=3, ylim=c(0,max(iap2$DCWT2[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$DCWT2[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Rel. Anatomical Density (CWTRAD/DRAD)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.40 Plot and save intra-annual profile of DCWA (CWA-based relative anatomical density=best estimate!) ####
png(file=paste(woodid, "_Intraprofile_DCWA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$DCWA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Rel. Anatomical Density (CWA based)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.1,0.95))
cex.axis=3, cex.lab=3, ylim=c(0.1,max(iap2$DCWA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$DCWA[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Rel. Anatomical Density (CWA based)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.41 Plot and save intra-annual profile of TB2 (cell wall reinforcement index) ####
png(file=paste(woodid, "_Intraprofile_TB2_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$TB2[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Cell wall reinforcement index", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.1,0.95))
cex.axis=3, cex.lab=3, ylim=c(0,max(iap2$TB2[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$TB2[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Cell wall reinforcement index", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.42 Plot and save intra-annual profile of KH (theoretical hydraulic conductivity) ####
png(file=paste(woodid, "_Intraprofile_KH_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$KH[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Theor. hydraul. conductivity (m4*s-1*MPa-1)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.1,0.95))
cex.axis=3, cex.lab=3, ylim=c(0,max(iap2$KH[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$KH[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Theor. hydraul. conductivity (m4*s-1*MPa-1)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.43 Plot and save intra-annual profile of DH (mean hydraulic diameter) ####
png(file=paste(woodid, "_Intraprofile_DH_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$DH[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Mean hydraulic diameter (Ám)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.1,0.95))
cex.axis=3, cex.lab=3, ylim=c(0,max(iap2$DH[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$DH[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Mean hydraulic diameter (Ám)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.44 Plot and save intra-annual profile of N.BAND (Number of cells per band, allowing duplicates) ####
png(file=paste(woodid, "_Intraprofile_N.BAND_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$N.BAND[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Number of cells per band (N.BAND)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.1,0.95))
cex.axis=3, cex.lab=3, ylim=c(0,max(iap2$N.BAND[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$N.BAND[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Number of cells per band (N.BAND)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.45 Plot and save intra-annual profile of N.TRUE (Number of cells per band, no duplicates for last band) ####
png(file=paste(woodid, "_Intraprofile_N.TRUE_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=im.width,height=im.height)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=iap2$N.TRUE[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Number of cells per band (N.TRUE)", type="l", lwd=0, col=LINE.COL(length(RESO))[r],
# cex.axis=3, cex.lab=3, ylim=c(0.1,0.95))
cex.axis=3, cex.lab=3, ylim=c(0,max(iap2$N.TRUE[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],na.rm=T)))
if (PLOT.IMG.ID==TRUE)
{
lim <- par("usr")
rect(bottomright, lim[3]-1, topleft, lim[4]+1, border = "gray90", col = "gray90")
}
lines(y=iap2$N.TRUE[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
x=iap2$RADDIST.CONT[iap2$YEAR>=YR.START & iap2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="", ylab="Number of cells per band (N.TRUE)", type="l", lwd=4, col=LINE.COL(length(RESO))[r],
cex.axis=3, cex.lab=3)
par(xaxt="s")
axis(1, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, cex.axis=2, labels=iap2$X.LABEL[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END])
axis(3, at=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END]
, tck=0.015, lwd.ticks=2, labels=FALSE)
axis(2, tck=0.015, lwd.ticks=2, labels=TRUE, cex.axis=2)
axis(4, tck=0.015, lwd.ticks=2, labels=FALSE)
abline(v=iap2$RADDIST.CONT[!is.na(iap2$X.LABEL) & iap2$YEAR>=YR.START & iap2$YEAR<=YR.END], lwd=2, lty=3)
dev.off()
### 4.6.1.45 Prepare calculations of annual statistics ####
### Make sure NaNs are replaced by NAs
iap2 <- data.frame(lapply(iap2, function(x){replace(x, is.infinite(x), NA)}))
iap2 <- data.frame(lapply(iap2, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
### Add column that gives information whether ring includes only earlwood or only latewood bands
iap2 <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), mutate, ONLYEW=RobustMax(RTSR))
iap2 <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), mutate, ONLYLW=RobustMin(RTSR))
iap2$EWLW.ID2 <- ifelse(iap2$ONLYEW<1,"only.ew", ifelse(iap2$ONLYLW>1,"only.lw","NA"))
iap2$ONLYEW <- NULL
iap2$ONLYLW <- NULL
### 4.6.1.46 Calculate annual ring width (MRW), earlywood width (EWW), latewood width(LWW) ####
mrw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MRW=mean(MRW, na.rm=T))
eww <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, EWW=mean(EWW, na.rm=T))
lww <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, LWW=mean(LWW, na.rm=T))
### 4.6.1.47 Calculate annual cell lumen area (LA) statistics ####
mla <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MLA=mean(LA, na.rm=TRUE))
mla <- data.frame(lapply(mla, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mla.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MLA.EW=mean(LA[EWLW.ID=="ew"], na.rm=TRUE))
mla.ew <- data.frame(lapply(mla.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mla.ew$MLA.EW <- ifelse(is.na(mla.ew$MLA.EW),mla$MLA,mla.ew$MLA.EW)
mla.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MLA.LW=mean(LA[EWLW.ID=="lw"], na.rm=TRUE))
mla.lw <- data.frame(lapply(mla.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mla.lw$MLA.LW <- ifelse(is.na(mla.lw$MLA.LW),mla$MLA,mla.lw$MLA.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$LA),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXLA=RobustMax(LA),
RPOS.MXLA=RRADDISTR[which.max(LA)],
APOS.MXLA=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(LA)],
BAND.MXLA=round(RADDISTR.BAND[which.max(LA)])),
by=key(dt)]
mxla <- as.data.frame(dt)
mxla <- merge(mxla, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$LA),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNLA=RobustMin(LA),
RPOS.MNLA=RRADDISTR[which.min(LA)],
APOS.MNLA=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(LA)],
BAND.MNLA=round(RADDISTR.BAND[which.min(LA)])),
by=key(dt)]
mnla <- as.data.frame(dt)
mnla <- merge(mnla, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.48 Calculate annual total cell area (TCA) statistics ####
mtca <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MTCA=mean(TCA, na.rm=TRUE))
mtca <- data.frame(lapply(mtca, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mtca.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MTCA.EW=mean(TCA[EWLW.ID=="ew"], na.rm=TRUE))
mtca.ew <- data.frame(lapply(mtca.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mtca.ew$MTCA.EW <- ifelse(is.na(mtca.ew$MTCA.EW),mtca$MTCA, mtca.ew$MTCA.EW)
mtca.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MTCA.LW=mean(TCA[EWLW.ID=="lw"], na.rm=TRUE))
mtca.lw <- data.frame(lapply(mtca.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mtca.lw$MTCA.LW <- ifelse(is.na(mtca.lw$MTCA.LW),mtca$MTCA, mtca.lw$MTCA.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$TCA),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXTCA=RobustMax(TCA),
RPOS.MXTCA=RRADDISTR[which.max(TCA)],
APOS.MXTCA=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(TCA)],
BAND.MXTCA=round(RADDISTR.BAND[which.max(TCA)])),
by=key(dt)]
mxtca <- as.data.frame(dt)
mxtca <- merge(mxtca, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$TCA),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNTCA=RobustMin(TCA),
RPOS.MNTCA=RRADDISTR[which.min(TCA)],
APOS.MNTCA=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(TCA)],
BAND.MNTCA=round(RADDISTR.BAND[which.min(TCA)])),
by=key(dt)]
mntca <- as.data.frame(dt)
mntca <- merge(mntca, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.49 Calculate annual radial cell lumen diameter (DRAD) statistics ####
mdrad <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, DRAD=mean(DRAD, na.rm=TRUE))
mdrad <- data.frame(lapply(mdrad, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdrad.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, DRAD.EW=mean(DRAD[EWLW.ID=="ew"], na.rm=TRUE))
mdrad.ew <- data.frame(lapply(mdrad.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdrad.ew$DRAD.EW <- ifelse(is.na(mdrad.ew$DRAD.EW),mdrad$DRAD,mdrad.ew$DRAD.EW)
mdrad.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, DRAD.LW=mean(DRAD[EWLW.ID=="lw"], na.rm=TRUE))
mdrad.lw <- data.frame(lapply(mdrad.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mdrad.lw$DRAD.LW <- ifelse(is.na(mdrad.lw$DRAD.LW),mdrad$DRAD,mdrad.lw$DRAD.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DRAD),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXDRAD=RobustMax(DRAD),
RPOS.MXDRAD=RRADDISTR[which.max(DRAD)],
APOS.MXDRAD=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(DRAD)],
BAND.MXDRAD=round(RADDISTR.BAND[which.max(DRAD)])),
by=key(dt)]
mxdrad <- as.data.frame(dt)
mxdrad <- merge(mxdrad, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DRAD),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNDRAD=RobustMin(DRAD),
RPOS.MNDRAD=RRADDISTR[which.min(DRAD)],
APOS.MNDRAD=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(DRAD)],
BAND.MNDRAD=round(RADDISTR.BAND[which.min(DRAD)])),
by=key(dt)]
mndrad <- as.data.frame(dt)
mndrad <- merge(mndrad, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.50 Calculate annual tangential cell lumen diameter (DTAN) statistics ####
mdtan <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, DTAN=mean(DTAN, na.rm=TRUE))
mdtan <- data.frame(lapply(mdtan, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdtan.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, DTAN.EW=mean(DTAN[EWLW.ID=="ew"], na.rm=TRUE))
mdtan.ew <- data.frame(lapply(mdtan.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdtan.ew$DTAN.EW <- ifelse(is.na(mdtan.ew$DTAN.EW),mdtan$DTAN,mdtan.ew$DTAN.EW)
mdtan.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, DTAN.LW=mean(DTAN[EWLW.ID=="lw"], na.rm=TRUE))
mdtan.lw <- data.frame(lapply(mdtan.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mdtan.lw$DTAN.LW <- ifelse(is.na(mdtan.lw$DTAN.LW),mdtan$DTAN,mdtan.lw$DTAN.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DTAN),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXDTAN=RobustMax(DTAN),
RPOS.MXDTAN=RRADDISTR[which.max(DTAN)],
APOS.MXDTAN=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(DTAN)],
BAND.MXDTAN=round(RADDISTR.BAND[which.max(DTAN)])),
by=key(dt)]
mxdtan <- as.data.frame(dt)
mxdtan <- merge(mxdtan, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DTAN),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNDTAN=RobustMin(DTAN),
RPOS.MNDTAN=RRADDISTR[which.min(DTAN)],
APOS.MNDTAN=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(DTAN)],
BAND.MNDTAN=round(RADDISTR.BAND[which.min(DTAN)])),
by=key(dt)]
mndtan <- as.data.frame(dt)
mndtan <- merge(mndtan, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.51 Calculate annual cell wall area (CWA) statistics ####
mcwa <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWA=mean(CWA, na.rm=TRUE))
mcwa <- data.frame(lapply(mcwa, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwa.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWA.EW=mean(CWA[EWLW.ID=="ew"], na.rm=TRUE))
mcwa.ew <- data.frame(lapply(mcwa.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwa.ew$MCWA.EW <- ifelse(is.na(mcwa.ew$MCWA.EW),mcwa$MCWA, mcwa.ew$MCWA.EW)
mcwa.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWA.LW=mean(CWA[EWLW.ID=="lw"], na.rm=TRUE))
mcwa.lw <- data.frame(lapply(mcwa.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mcwa.lw$MCWA.LW <- ifelse(is.na(mcwa.lw$MCWA.LW),mcwa$MCWA, mcwa.lw$MCWA.LW)
dt <- NULL
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWA),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXCWA=RobustMax(CWA),
RPOS.MXCWA=RRADDISTR[which.max(CWA)],
APOS.MXCWA=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(CWA)],
BAND.MXCWA=round(RADDISTR.BAND[which.max(CWA)])),
by=key(dt)]
mxcwa <- as.data.frame(dt)
mxcwa <- merge(mxcwa, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWA),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNCWA=RobustMin(CWA),
RPOS.MNCWA=RRADDISTR[which.min(CWA)],
APOS.MNCWA=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(CWA)],
BAND.MNCWA=round(RADDISTR.BAND[which.min(CWA)])),
by=key(dt)]
mncwa <- as.data.frame(dt)
mncwa <- merge(mncwa, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.52 Calculate annual accumulated cell wall area (CWAACC) statistics ####
cwaacc <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, CWAACC=sum(CWAACC, na.rm=TRUE))
cwaacc <- data.frame(lapply(cwaacc, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
cwaacc.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, CWAACC.EW=sum(CWAACC[EWLW.ID=="ew"], na.rm=TRUE))
cwaacc.ew <- data.frame(lapply(cwaacc.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
cwaacc.ew$CWAACC.EW <- ifelse(is.na(cwaacc.ew$CWAACC.EW),cwaacc$CWAACC,cwaacc.ew$CWAACC.EW)
cwaacc.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, CWAACC.LW=sum(CWAACC[EWLW.ID=="lw"], na.rm=TRUE))
cwaacc.lw <- data.frame(lapply(cwaacc.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
cwaacc.lw$CWAACC.LW <- ifelse(cwaacc.lw$CWAACC.LW==0,NA,cwaacc.lw$CWAACC.LW)
# cwaacc.lw$CWAACC.LW <- ifelse(is.na(cwaacc.lw$CWAACC.LW),cwaacc$CWAACC,cwaacc.lw$CWAACC.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWAACC),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXCWAACC=RobustMax(CWAACC),
RPOS.MXCWAACC=RRADDISTR[which.max(CWAACC)],
APOS.MXCWAACC=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(CWAACC)],
BAND.MXCWAACC=round(RADDISTR.BAND[which.max(CWAACC)])),
by=key(dt)]
mxcwaacc <- as.data.frame(dt)
mxcwaacc <- merge(mxcwaacc, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWAACC),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNCWAACC=RobustMin(CWAACC),
RPOS.MNCWAACC=RRADDISTR[which.min(CWAACC)],
APOS.MNCWAACC=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(CWAACC)],
BAND.MNCWAACC=round(RADDISTR.BAND[which.min(CWAACC)])),
by=key(dt)]
mncwaacc <- as.data.frame(dt)
mncwaacc <- merge(mncwaacc, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.53 Calculate annual mean cell wall thickness (CWTALL) statistics ####
mcwtall <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTALL=mean(CWTALL, na.rm=TRUE))
mcwtall <- data.frame(lapply(mcwtall, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwtall.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTALL.EW=mean(CWTALL[EWLW.ID=="ew"], na.rm=TRUE))
mcwtall.ew <- data.frame(lapply(mcwtall.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwtall.ew$MCWTALL.EW <- ifelse(is.na(mcwtall.ew$MCWTALL.EW),mcwtall$MCWTALL, mcwtall.ew$MCWTALL.EW)
mcwtall.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTALL.LW=mean(CWTALL[EWLW.ID=="lw"], na.rm=TRUE))
mcwtall.lw <- data.frame(lapply(mcwtall.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mcwtall.lw$MCWTALL.LW <- ifelse(is.na(mcwtall.lw$MCWTALL.LW),mcwtall$MCWTALL, mcwtall.lw$MCWTALL.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWTALL),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXCWTALL=RobustMax(CWTALL),
RPOS.MXCWTALL=RRADDISTR[which.max(CWTALL)],
APOS.MXCWTALL=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(CWTALL)],
BAND.MXCWTALL=round(RADDISTR.BAND[which.max(CWTALL)])),
by=key(dt)]
mxcwtall <- as.data.frame(dt)
mxcwtall <- merge(mxcwtall, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWTALL),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNCWTALL=RobustMin(CWTALL),
RPOS.MNCWTALL=RRADDISTR[which.min(CWTALL)],
APOS.MNCWTALL=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(CWTALL)],
BAND.MNCWTALL=round(RADDISTR.BAND[which.min(CWTALL)])),
by=key(dt)]
mncwtall <- as.data.frame(dt)
mncwtall <- merge(mncwtall, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.54 Calcuate annual mean cell wall thickness adjusted (CWTALL.ADJ) statistics ####
mcwtalladj <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTALL.ADJ=mean(CWTALL.ADJ, na.rm=TRUE))
mcwtalladj <- data.frame(lapply(mcwtalladj, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwtalladj.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTALL.ADJ.EW=mean(CWTALL.ADJ[EWLW.ID=="ew"], na.rm=TRUE))
mcwtalladj.ew <- data.frame(lapply(mcwtalladj.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwtalladj.ew$MCWTALL.ADJ.EW <- ifelse(is.na(mcwtalladj.ew$MCWTALL.ADJ.EW),mcwtalladj$MCWTALL.ADJ, mcwtalladj.ew$MCWTALL.ADJ.EW)
mcwtalladj.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTALL.ADJ.LW=mean(CWTALL.ADJ[EWLW.ID=="lw"], na.rm=TRUE))
mcwtalladj.lw <- data.frame(lapply(mcwtalladj.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mcwtalladj.lw$MCWTALL.ADJ.LW <- ifelse(is.na(mcwtalladj.lw$MCWTALL.ADJ.LW),mcwtalladj$MCWTALL.ADJ, mcwtalladj.lw$MCWTALL.ADJ.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWTALL.ADJ),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXCWTALL.ADJ=RobustMax(CWTALL.ADJ),
RPOS.MXCWTALL.ADJ=RRADDISTR[which.max(CWTALL.ADJ)],
APOS.MXCWTALL.ADJ=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(CWTALL.ADJ)],
BAND.MXCWTALL.ADJ=round(RADDISTR.BAND[which.max(CWTALL.ADJ)])),
by=key(dt)]
mxcwtalladj <- as.data.frame(dt)
mxcwtalladj <- merge(mxcwtalladj, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWTALL.ADJ),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNCWTALL.ADJ=RobustMin(CWTALL.ADJ),
RPOS.MNCWTALL.ADJ=RRADDISTR[which.min(CWTALL.ADJ)],
APOS.MNCWTALL.ADJ=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(CWTALL.ADJ)],
BAND.MNCWTALL.ADJ=round(RADDISTR.BAND[which.min(CWTALL.ADJ)])),
by=key(dt)]
mncwtalladj <- as.data.frame(dt)
mncwtalladj <- merge(mncwtalladj, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.55 Calculate annual tangential cell wall thickness (CWTTAN) statistics ####
mcwttan <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTTAN=mean(CWTTAN, na.rm=TRUE))
mcwttan <- data.frame(lapply(mcwttan, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwttan.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTTAN.EW=mean(CWTTAN[EWLW.ID=="ew"], na.rm=TRUE))
mcwttan.ew <- data.frame(lapply(mcwttan.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwttan.ew$MCWTTAN.EW <- ifelse(is.na(mcwttan.ew$MCWTTAN.EW),mcwttan$MCWTTAN, mcwttan.ew$MCWTTAN.EW)
mcwttan.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTTAN.LW=mean(CWTTAN[EWLW.ID=="lw"], na.rm=TRUE))
mcwttan.lw <- data.frame(lapply(mcwttan.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mcwttan.lw$MCWTTAN.LW <- ifelse(is.na(mcwttan.lw$MCWTTAN.LW),mcwttan$MCWTTAN, mcwttan.lw$MCWTTAN.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWTTAN),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXCWTTAN=RobustMax(CWTTAN),
RPOS.MXCWTTAN=RRADDISTR[which.max(CWTTAN)],
APOS.MXCWTTAN=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(CWTTAN)],
BAND.MXCWTTAN=round(RADDISTR.BAND[which.max(CWTTAN)])),
by=key(dt)]
mxcwttan <- as.data.frame(dt)
mxcwttan <- merge(mxcwttan, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWTTAN),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNCWTTAN=RobustMin(CWTTAN),
RPOS.MNCWTTAN=RRADDISTR[which.min(CWTTAN)],
APOS.MNCWTTAN=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(CWTTAN)],
BAND.MNCWTTAN=round(RADDISTR.BAND[which.min(CWTTAN)])),
by=key(dt)]
mncwttan <- as.data.frame(dt)
mncwttan <- merge(mncwttan, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.56 Calculate annual radial cell wall thickness (CWTRAD) statistics ####
mcwtrad <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTRAD=mean(CWTRAD, na.rm=TRUE))
mcwtrad <- data.frame(lapply(mcwtrad, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwtrad.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTRAD.EW=mean(CWTRAD[EWLW.ID=="ew"], na.rm=TRUE))
mcwtrad.ew <- data.frame(lapply(mcwtrad.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mcwtrad.ew$MCWTRAD.EW <- ifelse(is.na(mcwtrad.ew$MCWTRAD.EW),mcwtrad$MCWTRAD, mcwtrad.ew$MCWTRAD.EW)
mcwtrad.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCWTRAD.LW=mean(CWTRAD[EWLW.ID=="lw"], na.rm=TRUE))
mcwtrad.lw <- data.frame(lapply(mcwtrad.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mcwtrad.lw$MCWTRAD.LW <- ifelse(is.na(mcwtrad.lw$MCWTRAD.LW),mcwtrad$MCWTRAD, mcwtrad.lw$MCWTRAD.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWTRAD),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXCWTRAD=RobustMax(CWTRAD),
RPOS.MXCWTRAD=RRADDISTR[which.max(CWTRAD)],
APOS.MXCWTRAD=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(CWTRAD)],
BAND.MXCWTRAD=round(RADDISTR.BAND[which.max(CWTRAD)])),
by=key(dt)]
mxcwtrad <- as.data.frame(dt)
mxcwtrad <- merge(mxcwtrad, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CWTRAD),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNCWTRAD=RobustMin(CWTRAD),
RPOS.MNCWTRAD=RRADDISTR[which.min(CWTRAD)],
APOS.MNCWTRAD=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(CWTRAD)],
BAND.MNCWTRAD=round(RADDISTR.BAND[which.min(CWTRAD)])),
by=key(dt)]
mncwtrad <- as.data.frame(dt)
mncwtrad <- merge(mncwtrad, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.57 Calculate annual Mork's index (RTSR) statistics ####
mmork <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MRTSR=mean(RTSR, na.rm=TRUE))
mmork <- data.frame(lapply(mmork, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mmork.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MRTSR.EW=mean(RTSR[EWLW.ID=="ew"], na.rm=TRUE))
mmork.ew <- data.frame(lapply(mmork.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mmork.ew$MRTSR.EW <- ifelse(is.na(mmork.ew$MRTSR.EW),mmork$MRTSR, mmork.ew$MRTSR.EW)
mmork.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MRTSR.LW=mean(RTSR[EWLW.ID=="lw"], na.rm=TRUE))
mmork.lw <- data.frame(lapply(mmork.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mmork.lw$MRTSR.LW <- ifelse(is.na(mmork.lw$MRTSR.LW),mmork$MRTSR, mmork.lw$MRTSR.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$RTSR),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXRTSR=RobustMax(RTSR),
RPOS.MXRTSR=RRADDISTR[which.max(RTSR)],
APOS.MXRTSR=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(RTSR)],
BAND.MXRTSR=round(RADDISTR.BAND[which.max(RTSR)])),
by=key(dt)]
mxmork <- as.data.frame(dt)
mxmork <- merge(mxmork, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$RTSR),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNRTSR=RobustMin(RTSR),
RPOS.MNRTSR=RRADDISTR[which.min(RTSR)],
APOS.MNRTSR=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(RTSR)],
BAND.MNRTSR=round(RADDISTR.BAND[which.min(RTSR)])),
by=key(dt)]
mnmork <- as.data.frame(dt)
mnmork <- merge(mnmork, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.58 Calculate annual circular thickness-to-span ratio (CTSR) statistics ####
mctsr <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCTSR=mean(CTSR, na.rm=TRUE))
mctsr <- data.frame(lapply(mctsr, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mctsr.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCTSR.EW=mean(CTSR[EWLW.ID=="ew"], na.rm=TRUE))
mctsr.ew <- data.frame(lapply(mctsr.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mctsr.ew$MCTSR.EW <- ifelse(is.na(mctsr.ew$MCTSR.EW),mctsr$MCTSR, mctsr.ew$MCTSR.EW)
mctsr.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MCTSR.LW=mean(CTSR[EWLW.ID=="lw"], na.rm=TRUE))
mctsr.lw <- data.frame(lapply(mctsr.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mctsr.lw$MCTSR.LW <- ifelse(is.na(mctsr.lw$MCTSR.LW),mctsr$MCTSR, mctsr.lw$MCTSR.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CTSR),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXCTSR=RobustMax(CTSR),
RPOS.MXCTSR=RRADDISTR[which.max(CTSR)],
APOS.MXCTSR=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(CTSR)],
BAND.MXCTSR=round(RADDISTR.BAND[which.max(CTSR)])),
by=key(dt)]
mxctsr <- as.data.frame(dt)
mxctsr <- merge(mxctsr, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$CTSR),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNCTSR=RobustMin(CTSR),
RPOS.MNCTSR=RRADDISTR[which.min(CTSR)],
APOS.MNCTSR=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(CTSR)],
BAND.MNCTSR=round(RADDISTR.BAND[which.min(CTSR)])),
by=key(dt)]
mnctsr <- as.data.frame(dt)
mnctsr <- merge(mnctsr, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.59 Caculate annual relative anatomical density based on CWT (DCWT) statistics ####
mdcwt <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWT=mean(DCWT, na.rm=TRUE))
mdcwt <- data.frame(lapply(mdcwt, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdcwt.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWT.EW=mean(DCWT[EWLW.ID=="ew"], na.rm=TRUE))
mdcwt.ew <- data.frame(lapply(mdcwt.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdcwt.ew$MDCWT.EW <- ifelse(is.na(mdcwt.ew$MDCWT.EW),mdcwt$MDCWT, mdcwt.ew$MDCWT.EW)
mdcwt.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWT.LW=mean(DCWT[EWLW.ID=="lw"], na.rm=TRUE))
mdcwt.lw <- data.frame(lapply(mdcwt.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mdcwt.lw$MDCWT.LW <- ifelse(is.na(mdcwt.lw$MDCWT.LW),mdcwt$MDCWT, mdcwt.lw$MDCWT.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DCWT),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXDCWT=RobustMax(DCWT),
RPOS.MXDCWT=RRADDISTR[which.max(DCWT)],
APOS.MXDCWT=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(DCWT)],
BAND.MXDCWT=round(RADDISTR.BAND[which.max(DCWT)])),
by=key(dt)]
mxdcwt <- as.data.frame(dt)
mxdcwt <- merge(mxdcwt, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DCWT),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNDCWT=RobustMin(DCWT),
RPOS.MNDCWT=RRADDISTR[which.min(DCWT)],
APOS.MNDCWT=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(DCWT)],
BAND.MNDCWT=round(RADDISTR.BAND[which.min(DCWT)])),
by=key(dt)]
mndcwt <- as.data.frame(dt)
mndcwt <- merge(mndcwt, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.60 Calculate annual special relative anatomical density (DCWT2=CWTRAD/DRAD) statistics ####
mdcwt2 <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWT2=mean(DCWT2, na.rm=TRUE))
mdcwt2 <- data.frame(lapply(mdcwt2, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdcwt2.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWT2.EW=mean(DCWT2[EWLW.ID=="ew"], na.rm=TRUE))
mdcwt2.ew <- data.frame(lapply(mdcwt2.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdcwt2.ew$MDCWT2.EW <- ifelse(is.na(mdcwt2.ew$MDCWT2.EW),mdcwt2$MDCWT2, mdcwt2.ew$MDCWT2.EW)
mdcwt2.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWT2.LW=mean(DCWT2[EWLW.ID=="lw"], na.rm=TRUE))
mdcwt2.lw <- data.frame(lapply(mdcwt2.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mdcwt2.lw$MDCWT2.LW <- ifelse(is.na(mdcwt2.lw$MDCWT2.LW),mdcwt2$MDCWT2, mdcwt2.lw$MDCWT2.LW)
dt <- NULL
dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- dt[!is.na(dt$DCWT2),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXDCWT2=RobustMax(DCWT2),
RPOS.MXDCWT2=RRADDISTR[which.max(DCWT2)],
APOS.MXDCWT2=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(DCWT2)],
BAND.MXDCWT2=round(RADDISTR.BAND[which.max(DCWT2)])),
by=key(dt)]
mxdcwt2 <- as.data.frame(dt)
mxdcwt2 <- merge(mxdcwt2, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- dt[!is.na(dt$DCWT2),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNDCWT2=RobustMin(DCWT2),
RPOS.MNDCWT2=RRADDISTR[which.min(DCWT2)],
APOS.MNDCWT2=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(DCWT2)],
BAND.MNDCWT2=round(RADDISTR.BAND[which.min(DCWT2)])),
by=key(dt)]
mndcwt2 <- as.data.frame(dt)
mndcwt2 <- merge(mndcwt2, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.61 Calculate annual relative anatomical density based on CWA (DCWA) statistics ####
mdcwa <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWA=mean(DCWA, na.rm=TRUE))
mdcwa <- data.frame(lapply(mdcwa, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdcwa.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWA.EW=mean(DCWA[EWLW.ID=="ew"], na.rm=TRUE))
mdcwa.ew <- data.frame(lapply(mdcwa.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdcwa.ew$MDCWA.EW <- ifelse(is.na(mdcwa.ew$MDCWA.EW),mdcwa$MDCWA,mdcwa.ew$MDCWA.EW)
mdcwa.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDCWA.LW=mean(DCWA[EWLW.ID=="lw"], na.rm=TRUE))
mdcwa.lw <- data.frame(lapply(mdcwa.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mdcwa.lw$MDCWA.LW <- ifelse(is.na(mdcwa.lw$MDCWA.LW),mdcwa$MDCWA,mdcwa.lw$MDCWA.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DCWA),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXDCWA=RobustMax(DCWA),
RPOS.MXDCWA=RRADDISTR[which.max(DCWA)],
APOS.MXDCWA=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(DCWA)],
BAND.MXDCWA=round(RADDISTR.BAND[which.max(DCWA)])),
by=key(dt)]
mxdcwa <- as.data.frame(dt)
mxdcwa <- merge(mxdcwa, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DCWA),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNDCWA=RobustMin(DCWA),
RPOS.MNDCWA=RRADDISTR[which.min(DCWA)],
APOS.MNDCWA=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(DCWA)],
BAND.MNDCWA=round(RADDISTR.BAND[which.min(DCWA)])),
by=key(dt)]
mndcwa <- as.data.frame(dt)
mndcwa <- merge(mndcwa, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.62 Calculate annual cell reinforcement index, (t/b)2 (TB2) statistics ####
mtb2 <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MTB2=mean(TB2, na.rm=TRUE))
mtb2 <- data.frame(lapply(mtb2, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mtb2.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MTB2.EW=mean(TB2[EWLW.ID=="ew"], na.rm=TRUE))
mtb2.ew <- data.frame(lapply(mtb2.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mtb2.ew$MTB2.EW <- ifelse(is.na(mtb2.ew$MTB2.EW),mtb2$MTB2,mtb2.ew$MTB2.EW)
mtb2.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MTB2.LW=mean(TB2[EWLW.ID=="lw"], na.rm=TRUE))
mtb2.lw <- data.frame(lapply(mtb2.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mtb2.lw$MTB2.LW <- ifelse(is.na(mtb2.lw$MTB2.LW),mtb2$MTB2,mtb2.lw$MTB2.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$TB2),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXTB2=RobustMax(TB2),
RPOS.MXTB2=RRADDISTR[which.max(TB2)],
APOS.MXTB2=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(TB2)],
BAND.MXTB2=round(RADDISTR.BAND[which.max(TB2)])),
by=key(dt)]
mxtb2 <- as.data.frame(dt)
mxtb2 <- merge(mxtb2, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$TB2),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNTB2=RobustMin(TB2),
RPOS.MNTB2=RRADDISTR[which.min(TB2)],
APOS.MNTB2=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(TB2)],
BAND.MNTB2=round(RADDISTR.BAND[which.min(TB2)])),
by=key(dt)]
mntb2 <- as.data.frame(dt)
mntb2 <- merge(mntb2, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.63 Calculate annual theoretical hydraulic conductivity (KH) statistics ####
mkh <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MKH=mean(KH, na.rm=TRUE))
mkh <- data.frame(lapply(mkh, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mkh.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MKH.EW=mean(KH[EWLW.ID=="ew"], na.rm=TRUE))
mkh.ew <- data.frame(lapply(mkh.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mkh.ew$MKH.EW <- ifelse(is.na(mkh.ew$MKH.EW),mkh$MKH,mkh.ew$MKH.EW)
mkh.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MKH.LW=mean(KH[EWLW.ID=="lw"], na.rm=TRUE))
mkh.lw <- data.frame(lapply(mkh.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mkh.lw$MKH.LW <- ifelse(is.na(mkh.lw$MKH.LW),mkh$MKH,mkh.lw$MKH.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$KH),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXKH=RobustMax(KH),
RPOS.MXKH=RRADDISTR[which.max(KH)],
APOS.MXKH=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(KH)],
BAND.MXKH=round(RADDISTR.BAND[which.max(KH)])),
by=key(dt)]
mxkh <- as.data.frame(dt)
mxkh <- merge(mxkh, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$KH),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNKH=RobustMin(KH),
RPOS.MNKH=RRADDISTR[which.min(KH)],
APOS.MNKH=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(KH)],
BAND.MNKH=round(RADDISTR.BAND[which.min(KH)])),
by=key(dt)]
mnkh <- as.data.frame(dt)
mnkh <- merge(mnkh, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.64 Calcuate annual mean hydraulic diameter (DH) statistics ####
mdh <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDH=mean(DH, na.rm=TRUE))
mdh <- data.frame(lapply(mdh, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdh.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDH.EW=mean(DH[EWLW.ID=="ew"], na.rm=TRUE))
mdh.ew <- data.frame(lapply(mdh.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
mdh.ew$MDH.EW <- ifelse(is.na(mdh.ew$MDH.EW),mdh$MDH,mdh.ew$MDH.EW)
mdh.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, MDH.LW=mean(DH[EWLW.ID=="lw"], na.rm=TRUE))
mdh.lw <- data.frame(lapply(mdh.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
# mdh.lw$MDH.LW <- ifelse(is.na(mdh.lw$MDH.LW),mdh$MDH,mdh.lw$MDH.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DH),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXDH=RobustMax(DH),
RPOS.MXDH=RRADDISTR[which.max(DH)],
APOS.MXDH=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(DH)],
BAND.MXDH=round(RADDISTR.BAND[which.max(DH)])),
by=key(dt)]
mxdh <- as.data.frame(dt)
mxdh <- merge(mxdh, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$DH),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNDH=RobustMin(DH),
RPOS.MNDH=RRADDISTR[which.min(DH)],
APOS.MNDH=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(DH)],
BAND.MNDH=round(RADDISTR.BAND[which.min(DH)])),
by=key(dt)]
mndh <- as.data.frame(dt)
mndh <- merge(mndh, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.65 Calculate annual number of cells (N.BAND) statistics ####
# ncells.band <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, NCELLS.BAND=sum(N.BAND, na.rm=TRUE))
# ncells.true <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, NCELLS.TRUE=sum(N.TRUE, na.rm=TRUE))
ncells.band <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, N.BAND=sum(N.BAND, na.rm=TRUE))
ncells.band <- data.frame(lapply(ncells.band, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
ncells.band.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, N.BAND.EW=sum(N.BAND[EWLW.ID=="ew"], na.rm=TRUE))
ncells.band.ew <- data.frame(lapply(ncells.band.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
ncells.band.ew$N.BAND.EW <- ifelse(is.na(ncells.band.ew$N.BAND.EW),ncells.band$N.BAND,ncells.band.ew$N.BAND.EW)
ncells.band.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, N.BAND.LW=sum(N.BAND[EWLW.ID=="lw"], na.rm=TRUE))
ncells.band.lw <- data.frame(lapply(ncells.band.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
ncells.band.lw$N.BAND.LW <- ifelse(ncells.band.lw$N.BAND.LW==0,NA,ncells.band.lw$N.BAND.LW)
# ncells.band.lw$N.BAND.LW <- ifelse(is.na(ncells.band.lw$N.BAND.LW),ncells.band$N.BAND,ncells.band.lw$N.BAND.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$N.BAND),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXN.BAND=RobustMax(N.BAND),
RPOS.MXN.BAND=RRADDISTR[which.max(N.BAND)],
APOS.MXN.BAND=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(N.BAND)],
BAND.MXN.BAND=round(RADDISTR.BAND[which.max(N.BAND)])),
by=key(dt)]
mxncells.band <- as.data.frame(dt)
mxncells.band <- merge(mxncells.band, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$N.BAND),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNN.BAND=RobustMin(N.BAND),
RPOS.MNN.BAND=RRADDISTR[which.min(N.BAND)],
APOS.MNN.BAND=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(N.BAND)],
BAND.MNN.BAND=round(RADDISTR.BAND[which.min(N.BAND)]),
MRW=mean(MRW,na.rm=TRUE)),
by=key(dt)]
dt$APOS.MNN.BAND <- ifelse(!is.na(dt$APOS.MNN.BAND), dt$APOS.MNN.BAND, (dt$MNN.BAND+RESO[r]/2) ) #if there minimum number of cells is 0, there is no RRADDISTR; assume center position of band instead
dt$RPOS.MNN.BAND <- ifelse(!is.na(dt$RPOS.MNN.BAND), dt$RPOS.MNN.BAND, (dt$APOS.MNN.BAND*100/dt$MRW)) #if there minimum number of cells is 0, there is no RRADDISTR; assume center position of band instead
dt$MRW <- NULL #remove temporary column that was only used to troubleshoot NAs in RPOS.MNN.BAND
mnncells.band <- as.data.frame(dt)
mnncells.band <- merge(mnncells.band, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.66 Calculate annual number of cells (N.TRUE) statistics ####
ncells.true <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, N.TRUE=sum(N.TRUE, na.rm=TRUE))
ncells.true <- data.frame(lapply(ncells.true, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
ncells.true.ew <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, N.TRUE.EW=sum(N.TRUE[EWLW.ID=="ew"], na.rm=TRUE))
ncells.true.ew <- data.frame(lapply(ncells.true.ew, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
ncells.true.ew$N.TRUE.EW <- ifelse(is.na(ncells.true.ew$N.TRUE.EW),ncells.true$N.TRUE,ncells.true.ew$N.TRUE.EW)
ncells.true.lw <- ddply(iap2, c("YEAR", "WOODID", "IMAGE"), summarise, N.TRUE.LW=sum(N.TRUE[EWLW.ID=="lw"], na.rm=TRUE))
ncells.true.lw <- data.frame(lapply(ncells.true.lw, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
ncells.true.lw$N.TRUE.LW <- ifelse(ncells.true.lw$N.TRUE.LW==0,NA,ncells.true.lw$N.TRUE.LW)
# ncells.true.lw$N.TRUE.LW <- ifelse(is.na(ncells.true.lw$N.TRUE.LW),ncells.true$N.TRUE,ncells.true.lw$N.TRUE.LW)
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="ew"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.lw",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$N.TRUE),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MXN.TRUE=RobustMax(N.TRUE),
RPOS.MXN.TRUE=RRADDISTR[which.max(N.TRUE)],
APOS.MXN.TRUE=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.max(N.TRUE)],
TRUE.MXN.TRUE=round(RADDISTR.BAND[which.max(N.TRUE)])),
by=key(dt)]
mxncells.true <- as.data.frame(dt)
mxncells.true <- merge(mxncells.true, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
dt <- NULL
# dt <- data.table(iap2[iap2$EWLW.ID=="lw"|is.na(iap2$EWLW.ID)|iap2$EWLW.ID2=="only.ew",])
dt <- data.table(iap2)
dt <- dt[!is.na(dt$N.TRUE),]
setkey(dt, "YEAR", "WOODID", "IMAGE")
dt <- dt[, list(MNN.TRUE=RobustMin(N.TRUE),
RPOS.MNN.TRUE=RRADDISTR[which.min(N.TRUE)],
APOS.MNN.TRUE=mean(MRW,na.rm=TRUE)/100*RRADDISTR[which.min(N.TRUE)],
TRUE.MNN.TRUE=round(RADDISTR.BAND[which.min(N.TRUE)]),
MRW=mean(MRW,na.rm=TRUE)),
by=key(dt)]
dt$APOS.MNN.TRUE <- ifelse(!is.na(dt$APOS.MNN.TRUE), dt$APOS.MNN.TRUE, (dt$MNN.TRUE+RESO[r]/2)) #if there minimum number of cells is 0, there is no RRADDISTR; assume center position of band instead
dt$RPOS.MNN.TRUE <- ifelse(!is.na(dt$RPOS.MNN.TRUE), dt$RPOS.MNN.TRUE, (dt$APOS.MNN.TRUE*100/dt$MRW)) #if there minimum number of cells is 0, there is no RRADDISTR; assume center position of band instead
dt$MRW <- NULL #remove temporary column that was only used to troubleshoot NAs in RPOS.MNN.TRUE
mnncells.true <- as.data.frame(dt)
mnncells.true <- merge(mnncells.true, ringTemplate, all=TRUE) #to make sure any missing ring gets NA
### 4.6.1.67 Summarize all annual statistics and save them to file ####
df.ann <- Reduce(function(x, y) merge(x, y, all=TRUE),
list(mrw, eww, lww,
ncells.band, ncells.band.ew, ncells.band.lw, mxncells.band, mnncells.band,
ncells.true, ncells.true.ew, ncells.true.lw, mxncells.true, mnncells.true,
mla, mla.ew, mla.lw, mxla, mnla,
mdrad, mdrad.ew, mdrad.lw, mxdrad, mndrad,
mdtan, mdtan.ew, mdtan.lw, mxdtan, mndtan,
mtca, mtca.ew, mtca.lw, mxtca, mntca,
mcwa, mcwa.ew, mcwa.lw, mxcwa, mncwa,
cwaacc, cwaacc.ew, cwaacc.lw, mxcwaacc, mncwaacc,
mcwtall, mcwtall.ew, mcwtall.lw, mxcwtall, mncwtall,
mcwtalladj, mcwtalladj.ew, mcwtalladj.lw, mxcwtalladj, mncwtalladj,
mcwttan, mcwttan.ew, mcwttan.lw, mxcwttan, mncwttan,
mcwtrad, mcwtrad.ew, mcwtrad.lw, mxcwtrad, mncwtrad,
mmork, mmork.ew, mmork.lw, mxmork, mnmork,
mctsr, mctsr.ew, mctsr.lw, mxctsr, mnctsr,
mdcwt, mdcwt.ew, mdcwt.lw, mxdcwt, mndcwt,
mdcwt2, mdcwt2.ew, mdcwt2.lw, mxdcwt2, mndcwt2,
mdcwa, mdcwa.ew, mdcwa.lw, mxdcwa, mndcwa,
mtb2, mtb2.ew, mtb2.lw, mxtb2, mntb2,
mkh, mkh.ew, mkh.lw, mxkh, mnkh,
mdh, mdh.ew, mdh.lw, mxdh, mndh))
df.ann <- data.frame(lapply(df.ann, function(x){replace(x, is.infinite(x), NA)})) #replace error codes (negative values) by NA
df.ann <- data.frame(lapply(df.ann, function(x){replace(x, is.nan(x), NA)})) #replace error codes (negative values) by NA
write.table(df.ann, file=paste(woodid, "_AnnualStats_", STATS[s], "_", RESO[r], "mu.txt", sep=""), row.names = FALSE)
# ### Just for fun...
# cor.test(mxdcwt$MXDCWT, mxdcwa$MXDCWA)
# cor.test(mndcwt$MNDCWT, mndcwa$MNDCWA)
# cor.test(mxdcwt$MXDCWT, mxcwtrad$MXCWTRAD)
# cor.test(mxcwtrad$MXCWTRAD, mxdcwa$MXDCWA)
# cor.test(mxcwtrad$MXCWTRAD, mxcwttan$MXCWTTAN)
### 4.6.1.68 Plot and save annual ring width (MRW) ####
png(file=paste(woodid, "_MRW_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mrw$MRW[mrw$YEAR>=YR.START & mrw$YEAR<=YR.END],
x=mrw$YEAR[mrw$YEAR>=YR.START & mrw$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Ring width (MRW) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #ylim=c(150,1500))
par(xaxt="s")
axis(1, at=mrw$YEAR[mrw$YEAR>=YR.START & mrw$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mrw$YEAR[mrw$YEAR>=YR.START & mrw$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mrw$YEAR[mrw$YEAR>=YR.START & mrw$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.69 Plot and save annual earlywood width (EWW) ####
png(file=paste(woodid, "_EWW_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=eww$EWW[eww$YEAR>=YR.START & eww$YEAR<=YR.END],
x=eww$YEAR[eww$YEAR>=YR.START & eww$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Earlywood width (EWW) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #ylim=c(150,1500))
par(xaxt="s")
axis(1, at=eww$YEAR[eww$YEAR>=YR.START & eww$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=eww$YEAR[eww$YEAR>=YR.START & eww$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=eww$YEAR[eww$YEAR>=YR.START & eww$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.70 Plot and save annual latewood width (LWW) ####
png(file=paste(woodid, "_LWW_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=lww$LWW[lww$YEAR>=YR.START & lww$YEAR<=YR.END],
x=lww$YEAR[lww$YEAR>=YR.START & lww$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Latewood width (LWW) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #ylim=c(150,1500))
par(xaxt="s")
axis(1, at=lww$YEAR[lww$YEAR>=YR.START & lww$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=lww$YEAR[lww$YEAR>=YR.START & lww$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=lww$YEAR[lww$YEAR>=YR.START & lww$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.71 Plot and save annual maximum lumen area (MXLA) ####
png(file=paste(woodid, "_MXLA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxla$MXLA[mxla$YEAR>=YR.START & mxla$YEAR<=YR.END],
x=mxla$YEAR[mxla$YEAR>=YR.START & mxla$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. Lumen area (MXLA) (Ám2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(150,1500))
par(xaxt="s")
axis(1, at=mxla$YEAR[mxla$YEAR>=YR.START & mxla$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxla$YEAR[mxla$YEAR>=YR.START & mxla$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxla$YEAR[mxla$YEAR>=YR.START & mxla$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.72 Plot and save annual minimum lumen area (MNLA) ####
png(file=paste(woodid, "_MNLA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mnla$MNLA[mnla$YEAR>=YR.START & mnla$YEAR<=YR.END],
x=mnla$YEAR[mnla$YEAR>=YR.START & mnla$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. Lumen area (MNLA) (Ám2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(5,700))
par(xaxt="s")
axis(1, at=mnla$YEAR[mnla$YEAR>=YR.START & mnla$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mnla$YEAR[mnla$YEAR>=YR.START & mnla$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mnla$YEAR[mnla$YEAR>=YR.START & mnla$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.73 Plot and save annual maximum cell area (MXTCA) ####
png(file=paste(woodid, "_MXTCA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxtca$MXTCA[mxtca$YEAR>=YR.START & mxtca$YEAR<=YR.END],
x=mxtca$YEAR[mxtca$YEAR>=YR.START & mxtca$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. Cell area (MXTCA) (Ám2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(200,5000))
par(xaxt="s")
axis(1, at=mxtca$YEAR[mxtca$YEAR>=YR.START & mxtca$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxtca$YEAR[mxtca$YEAR>=YR.START & mxtca$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxtca$YEAR[mxtca$YEAR>=YR.START & mxtca$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.74 Plot and save annual minimum cell area (MNTCA) ####
png(file=paste(woodid, "_MNTCA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mntca$MNTCA[mntca$YEAR>=YR.START & mntca$YEAR<=YR.END],
x=mntca$YEAR[mntca$YEAR>=YR.START & mntca$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. Cell area (MNTCA) (Ám2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(50,1000))
par(xaxt="s")
axis(1, at=mntca$YEAR[mntca$YEAR>=YR.START & mntca$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mntca$YEAR[mntca$YEAR>=YR.START & mntca$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mntca$YEAR[mntca$YEAR>=YR.START & mntca$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.75 Plot and save annual maximum accumulated cell wall area (MXCWAACC) ####
png(file=paste(woodid, "_MXCWAACC_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxcwa$MXCWA[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END],
x=mxcwa$YEAR[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. Acc. Cell wall area (MXCWAACC) (Ám2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
# cex.axis=1.3, cex.lab=2, ylim=c(100,4000))
cex.axis=1.3, cex.lab=2)
par(xaxt="s")
axis(1, at=mxcwa$YEAR[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxcwa$YEAR[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxcwa$YEAR[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.76 Plot and save annual minimum accumulated cell wall area (MNCWAACC) ####
png(file=paste(woodid, "_MNCWAACC_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mncwa$MNCWA[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END],
x=mncwa$YEAR[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. Acc. Cell wall area (MNCWAACC) (Ám2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
# cex.axis=1.3, cex.lab=2, ylim=c(20,500))
cex.axis=1.3, cex.lab=2)
par(xaxt="s")
axis(1, at=mncwa$YEAR[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mncwa$YEAR[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mncwa$YEAR[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.77 Plot and save annual maximum cell wall area (MXCWA) ####
png(file=paste(woodid, "_MXCWA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxcwa$MXCWA[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END],
x=mxcwa$YEAR[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. Cell wall area (MXCWA) (Ám2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(100,4000))
par(xaxt="s")
axis(1, at=mxcwa$YEAR[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxcwa$YEAR[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxcwa$YEAR[mxcwa$YEAR>=YR.START & mxcwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.78 Plot and save annual minimum cell wall area (MNCWA) ####
png(file=paste(woodid, "_MNCWA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mncwa$MNCWA[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END],
x=mncwa$YEAR[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. Cell wall area (MNCWA) (Ám2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(20,500))
par(xaxt="s")
axis(1, at=mncwa$YEAR[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mncwa$YEAR[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mncwa$YEAR[mncwa$YEAR>=YR.START & mncwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.79 Plot and save annual maximum tangential cell wall thickness (MXCWTTAN) ####
png(file=paste(woodid, "_MXCWTTAN_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxcwttan$MXCWTTAN[mxcwttan$YEAR>=YR.START & mxcwttan$YEAR<=YR.END],
x=mxcwttan$YEAR[mxcwttan$YEAR>=YR.START & mxcwttan$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. tangential cell wall thickness (MXCWTTAN) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(1.5,10))
par(xaxt="s")
axis(1, at=mxcwttan$YEAR[mxcwttan$YEAR>=YR.START & mxcwttan$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxcwttan$YEAR[mxcwttan$YEAR>=YR.START & mxcwttan$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxcwttan$YEAR[mxcwttan$YEAR>=YR.START & mxcwttan$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.80 Plot and save annual minimum tangential cell wall thickness (MNCWTTAN) ####
png(file=paste(woodid, "_MNCWTTAN_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mncwttan$MNCWTTAN[mncwttan$YEAR>=YR.START & mncwttan$YEAR<=YR.END],
x=mncwttan$YEAR[mncwttan$YEAR>=YR.START & mncwttan$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. tangential cell wall thickness (MNCWTTAN) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(1,6))
par(xaxt="s")
axis(1, at=mncwttan$YEAR[mncwttan$YEAR>=YR.START & mncwttan$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mncwttan$YEAR[mncwttan$YEAR>=YR.START & mncwttan$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mncwttan$YEAR[mncwttan$YEAR>=YR.START & mncwttan$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.81 Plot and save annual maximum radial cell wall thickness (MXCWTRAD) ####
png(file=paste(woodid, "_MXCWTRAD_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxcwtrad$MXCWTRAD[mxcwtrad$YEAR>=YR.START & mxcwtrad$YEAR<=YR.END],
x=mxcwtrad$YEAR[mxcwtrad$YEAR>=YR.START & mxcwtrad$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. radial cell wall thickness (MXCWTRAD) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(1.5,10))
par(xaxt="s")
axis(1, at=mxcwtrad$YEAR[mxcwtrad$YEAR>=YR.START & mxcwtrad$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxcwtrad$YEAR[mxcwtrad$YEAR>=YR.START & mxcwtrad$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxcwtrad$YEAR[mxcwtrad$YEAR>=YR.START & mxcwtrad$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.82 Plot and save annual minimum radial cell wall thickness (MNCWTRAD) ####
png(file=paste(woodid, "_MNCWTRAD_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mncwtrad$MNCWTRAD[mncwtrad$YEAR>=YR.START & mncwtrad$YEAR<=YR.END],
x=mncwtrad$YEAR[mncwtrad$YEAR>=YR.START & mncwtrad$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. radial cell wall thickness (MNCWTRAD) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(1,6))
par(xaxt="s")
axis(1, at=mncwtrad$YEAR[mncwtrad$YEAR>=YR.START & mncwtrad$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mncwtrad$YEAR[mncwtrad$YEAR>=YR.START & mncwtrad$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mncwtrad$YEAR[mncwtrad$YEAR>=YR.START & mncwtrad$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.83 Plot and save annual maximum Mork's index (MXRTSR) ####
png(file=paste(woodid, "_MXRTSR_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxmork$MXRTSR[mxmork$YEAR>=YR.START & mxmork$YEAR<=YR.END],
x=mxmork$YEAR[mxmork$YEAR>=YR.START & mxmork$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. Mork's index (MXRTSR)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(1.5,10))
par(xaxt="s")
axis(1, at=mxmork$YEAR[mxmork$YEAR>=YR.START & mxmork$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxmork$YEAR[mxmork$YEAR>=YR.START & mxmork$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxmork$YEAR[mxmork$YEAR>=YR.START & mxmork$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.84 Plot and save annual minimum Mork's index (MNRTSR) ####
png(file=paste(woodid, "_MNRTSR_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mnmork$MNRTSR[mnmork$YEAR>=YR.START & mnmork$YEAR<=YR.END],
x=mnmork$YEAR[mnmork$YEAR>=YR.START & mnmork$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. Mork's index (MNRTSR)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(1,6))
par(xaxt="s")
axis(1, at=mnmork$YEAR[mnmork$YEAR>=YR.START & mnmork$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mnmork$YEAR[mnmork$YEAR>=YR.START & mnmork$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mnmork$YEAR[mnmork$YEAR>=YR.START & mnmork$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.85 Plot and save annual maximum relative anatomical density based on CWT (DCWT) ####
png(file=paste(woodid, "_MXDCWT_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxdcwt$MXDCWT[mxdcwt$YEAR>=YR.START & mxdcwt$YEAR<=YR.END],
x=mxdcwt$YEAR[mxdcwt$YEAR>=YR.START & mxdcwt$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. Anatomical density (MXDCWT)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.3,1))
par(xaxt="s")
axis(1, at=mxdcwt$YEAR[mxdcwt$YEAR>=YR.START & mxdcwt$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxdcwt$YEAR[mxdcwt$YEAR>=YR.START & mxdcwt$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxdcwt$YEAR[mxdcwt$YEAR>=YR.START & mxdcwt$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.86 Plot and save annual minimum relative anatomical density based on CWT (DCWT) ####
png(file=paste(woodid, "_MNDCWT_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mndcwt$MNDCWT[mndcwt$YEAR>=YR.START & mndcwt$YEAR<=YR.END],
x=mndcwt$YEAR[mndcwt$YEAR>=YR.START & mndcwt$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. Anatomical density (MNDCWT)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.1,0.7))
par(xaxt="s")
axis(1, at=mndcwt$YEAR[mndcwt$YEAR>=YR.START & mndcwt$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mndcwt$YEAR[mndcwt$YEAR>=YR.START & mndcwt$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mndcwt$YEAR[mndcwt$YEAR>=YR.START & mndcwt$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.87 Plot and save annual maximum special anatomical density (DCWT2): CWTRAD/DRAD ####
png(file=paste(woodid, "_MXDCWT2_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxdcwt2$MXDCWT2[mxdcwt2$YEAR>=YR.START & mxdcwt2$YEAR<=YR.END],
x=mxdcwt2$YEAR[mxdcwt2$YEAR>=YR.START & mxdcwt2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. Anatomical density (MXDCWT2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
# cex.axis=1.3, cex.lab=2, ylim=c(0.3,1))
cex.axis=1.3, cex.lab=2)
par(xaxt="s")
axis(1, at=mxdcwt2$YEAR[mxdcwt2$YEAR>=YR.START & mxdcwt2$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxdcwt2$YEAR[mxdcwt2$YEAR>=YR.START & mxdcwt2$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxdcwt2$YEAR[mxdcwt2$YEAR>=YR.START & mxdcwt2$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.88 Plot and save annual minimum special anatomical density (DCWT2): CWTRAD/DRAD ####
png(file=paste(woodid, "_MNDCWT2_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mndcwt2$MNDCWT2[mndcwt2$YEAR>=YR.START & mndcwt2$YEAR<=YR.END],
x=mndcwt2$YEAR[mndcwt2$YEAR>=YR.START & mndcwt2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. Anatomical density (MNDCWT2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
# cex.axis=1.3, cex.lab=2, ylim=c(0.1,0.7))
cex.axis=1.3, cex.lab=2)
par(xaxt="s")
axis(1, at=mndcwt2$YEAR[mndcwt2$YEAR>=YR.START & mndcwt2$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mndcwt2$YEAR[mndcwt2$YEAR>=YR.START & mndcwt2$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mndcwt2$YEAR[mndcwt2$YEAR>=YR.START & mndcwt2$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.89 Plot and save annual maximum relative anatomical density based on CWA (DCWA) ####
png(file=paste(woodid, "_MXDCWA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxdcwa$MXDCWA[mxdcwa$YEAR>=YR.START & mxdcwa$YEAR<=YR.END],
x=mxdcwa$YEAR[mxdcwa$YEAR>=YR.START & mxdcwa$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. Anatomical density (MXDCWA)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.3,1))
par(xaxt="s")
axis(1, at=mxdcwa$YEAR[mxdcwa$YEAR>=YR.START & mxdcwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxdcwa$YEAR[mxdcwa$YEAR>=YR.START & mxdcwa$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxdcwa$YEAR[mxdcwa$YEAR>=YR.START & mxdcwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.90 Plot and save annual minimum relative anatomical density based on CWA (DCWA) ####
png(file=paste(woodid, "_MNDCWA_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mndcwa$MNDCWA[mndcwa$YEAR>=YR.START & mndcwa$YEAR<=YR.END],
x=mndcwa$YEAR[mndcwa$YEAR>=YR.START & mndcwa$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. Anatomical density (MNDCWA)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.1,0.7))
par(xaxt="s")
axis(1, at=mndcwa$YEAR[mndcwa$YEAR>=YR.START & mndcwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mndcwa$YEAR[mndcwa$YEAR>=YR.START & mndcwa$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mndcwa$YEAR[mndcwa$YEAR>=YR.START & mndcwa$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.91 Plot and save annual maximum cell wall reinforcement index (TB2) ####
png(file=paste(woodid, "_MXTB2_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxtb2$MXTB2[mxtb2$YEAR>=YR.START & mxtb2$YEAR<=YR.END],
x=mxtb2$YEAR[mxtb2$YEAR>=YR.START & mxtb2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. cell wall reinforcement index (MXTB2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.3,1))
par(xaxt="s")
axis(1, at=mxtb2$YEAR[mxtb2$YEAR>=YR.START & mxtb2$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxtb2$YEAR[mxtb2$YEAR>=YR.START & mxtb2$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxtb2$YEAR[mxtb2$YEAR>=YR.START & mxtb2$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.92 Plot and save annual minimum cell wall reinforcement index (TB2) ####
png(file=paste(woodid, "_MNTB2_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mntb2$MNTB2[mntb2$YEAR>=YR.START & mntb2$YEAR<=YR.END],
x=mntb2$YEAR[mntb2$YEAR>=YR.START & mntb2$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. cell wall reinforcement index (MNTB2)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.1,0.7))
par(xaxt="s")
axis(1, at=mntb2$YEAR[mntb2$YEAR>=YR.START & mntb2$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mntb2$YEAR[mntb2$YEAR>=YR.START & mntb2$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mntb2$YEAR[mntb2$YEAR>=YR.START & mntb2$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.93 Plot and save annual maximum mean hydraulic diameter (DH) ####
png(file=paste(woodid, "_MXDH_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxdh$MXDH[mxdh$YEAR>=YR.START & mxdh$YEAR<=YR.END],
x=mxdh$YEAR[mxdh$YEAR>=YR.START & mxdh$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. mean hydraulic diameter (MXDH) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.3,1))
par(xaxt="s")
axis(1, at=mxdh$YEAR[mxdh$YEAR>=YR.START & mxdh$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxdh$YEAR[mxdh$YEAR>=YR.START & mxdh$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxdh$YEAR[mxdh$YEAR>=YR.START & mxdh$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.94 Plot and save annual minimum mean hydraulic diameter (DH) ####
png(file=paste(woodid, "_MNDH_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mndh$MNDH[mndh$YEAR>=YR.START & mndh$YEAR<=YR.END],
x=mndh$YEAR[mndh$YEAR>=YR.START & mndh$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. mean hydraulic diameter (MNDH) (Ám)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.1,0.7))
par(xaxt="s")
axis(1, at=mndh$YEAR[mndh$YEAR>=YR.START & mndh$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mndh$YEAR[mndh$YEAR>=YR.START & mndh$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mndh$YEAR[mndh$YEAR>=YR.START & mndh$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.95 Plot and save annual maximum number of cells (N.BAND) ####
png(file=paste(woodid, "_MXN.BAND_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxncells.band$MXN.BAND[mxncells.band$YEAR>=YR.START & mxncells.band$YEAR<=YR.END],
x=mxncells.band$YEAR[mxncells.band$YEAR>=YR.START & mxncells.band$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. number of cells (MXN.BAND)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.3,1))
par(xaxt="s")
axis(1, at=mxncells.band$YEAR[mxncells.band$YEAR>=YR.START & mxncells.band$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxncells.band$YEAR[mxncells.band$YEAR>=YR.START & mxncells.band$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxncells.band$YEAR[mxncells.band$YEAR>=YR.START & mxncells.band$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.96 Plot and save annual minimum number of cells (N.BAND) ####
png(file=paste(woodid, "_MNN.BAND_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mnncells.band$MNN.BAND[mnncells.band$YEAR>=YR.START & mnncells.band$YEAR<=YR.END],
x=mnncells.band$YEAR[mnncells.band$YEAR>=YR.START & mnncells.band$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. number of cells (MNN.BAND)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.1,0.7))
par(xaxt="s")
axis(1, at=mnncells.band$YEAR[mnncells.band$YEAR>=YR.START & mnncells.band$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mnncells.band$YEAR[mnncells.band$YEAR>=YR.START & mnncells.band$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mnncells.band$YEAR[mnncells.band$YEAR>=YR.START & mnncells.band$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.97 Plot and save annual maximum number of cells (N.TRUE) ####
png(file=paste(woodid, "_MXN.TRUE_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mxncells.true$MXN.TRUE[mxncells.true$YEAR>=YR.START & mxncells.true$YEAR<=YR.END],
x=mxncells.true$YEAR[mxncells.true$YEAR>=YR.START & mxncells.true$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Max. number of cells (MXN.TRUE)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.3,1))
par(xaxt="s")
axis(1, at=mxncells.true$YEAR[mxncells.true$YEAR>=YR.START & mxncells.true$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mxncells.true$YEAR[mxncells.true$YEAR>=YR.START & mxncells.true$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mxncells.true$YEAR[mxncells.true$YEAR>=YR.START & mxncells.true$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
### 4.6.1.98 Plot and save annual minimum number of cells (N.TRUE) ####
png(file=paste(woodid, "_MNN.TRUE_", STATS[s], "_", RESO[r], "mu_", SMOOTHER, "rm", ".png", sep=""),width=3000,height=600)
par(mar=c(3,5.5,2.1,2.1)) #c(bottom, left, top, right)
plot(y=mnncells.true$MNN.TRUE[mnncells.true$YEAR>=YR.START & mnncells.true$YEAR<=YR.END],
x=mnncells.true$YEAR[mnncells.true$YEAR>=YR.START & mnncells.true$YEAR<=YR.END],
xaxt="n", yaxt="n", xlab="",ylab="Min. number of cells (MNN.TRUE)", type="l", lwd=3, col=LINE.COL(length(RESO))[r],
cex.axis=1.3, cex.lab=2) #, ylim=c(0.1,0.7))
par(xaxt="s")
axis(1, at=mnncells.true$YEAR[mnncells.true$YEAR>=YR.START & mnncells.true$YEAR<=YR.END], tck=0.02, lwd.ticks=2, cex.axis=1.3,
labels=mnncells.true$YEAR[mnncells.true$YEAR>=YR.START & mnncells.true$YEAR<=YR.END])
axis(2, tck=0.02, lwd.ticks=2, cex.axis=1.3, labels=TRUE)
axis(3, at=mnncells.true$YEAR[mnncells.true$YEAR>=YR.START & mnncells.true$YEAR<=YR.END], tck=0.02, lwd.ticks=2, labels=FALSE)
axis(4, tck=0.02, lwd.ticks=2, labels=FALSE)
dev.off()
}
}
}
Sys.time() - t
### 5. clean up ####
rm(list=ls(all=TRUE))
|
9af564bc4ab18631b3522f5c1d05cf1273153397
|
08fd1da1a01bf9b5f0af39cee5c18f96c0964d69
|
/Rscripts/12_population_trajectories.R
|
26679851307535a66b5f61e6c7cdac4cd01e593d
|
[] |
no_license
|
OConnor-Lab-UBC/J-TEMP
|
9e917efabb410f1aa43f5d1c3465fdb53d4dcba1
|
74548ab187cdfc70d868f838704317ae664b8eeb
|
refs/heads/master
| 2021-01-11T08:28:33.966999
| 2018-08-02T11:08:45
| 2018-08-02T11:08:45
| 72,261,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,415
|
r
|
12_population_trajectories.R
|
library(tidyverse)
library(stringr)
data <- read_csv("/Users/Joey/Documents/J-TEMP/data-processed/output_rK_TT_cell_abundance.csv")
obs_sim_data <- read_csv("/Users/Joey/Documents/J-TEMP/data-processed/TT_obs_sim_data_cell_abundance.csv")
obs <- obs_sim_data %>%
filter(ID == "16_4") %>%
filter(type == "observed")
model <- obs_sim_data %>%
filter(ID == "16_4") %>%
filter(type == "simulated") %>%
mutate(type = str_replace(type, "simulated","model"))
obs_sim_data %>%
filter(ID == "16_4") %>%
mutate(type = str_replace(type, "simulated","model")) %>%
ggplot(aes(x = time, y = P, color = type)) + geom_point(size = 2) +
scale_color_manual(values = c("orange", "cadetblue")) +
geom_point(aes(x = time, y = P), data = obs, size = 6, color = "cadetblue") +
geom_line(aes(x = time, y = P), data = model, size = 2, color = "orange") +
theme_bw() + theme(axis.text=element_text(size=16), axis.title=element_text(size=16,face="bold")) +
ylab("population abundance (cells/ml)") + xlab("time (days)") +
theme(legend.title=element_blank(), legend.text = element_text(size = 18))
ggsave("figures/k-temp-time-series-plot.pdf")
ggplot() +
geom_point(data = obs_sim_data, aes(x = time, y = P, color = type)) +
facet_wrap( ~ ID) + theme_bw() + ylab("population abundance (cells/ml)") + xlab("days")
# ggsave("figures/time_series_fits_TT.png", width = 12, height = 10)
?scale_alpha_manual
|
32e93ed83aae1708e6290e1e402581e5deeba7ca
|
f8af4d6f0e26d19dc2d956f8397d0896b8b961ba
|
/ch09/sec1_covariance.R
|
91bf78b8676e2a7849373271e69c2cabb8e0cbce
|
[] |
no_license
|
JadenChoi94/R_Stactistics
|
767b30b9096fd3021923172bb9c1917d683e7042
|
4dc5b10e0b794d60a8974bd9f35ffabe826b6e3d
|
refs/heads/master
| 2020-06-05T12:57:03.123781
| 2019-08-24T13:02:30
| 2019-08-24T13:02:30
| 192,445,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 982
|
r
|
sec1_covariance.R
|
# 예제 9-1. 아버지와 아들 키의 공분산과 상관계수
hf <- read.table("http://www.randomservices.org/random/data/Galton.txt", header=T, stringsAsFactors = FALSE)
str(hf)
hf$Gender <- factor(hf$Gender, levels=c("M", "F"))
hf.son <- subset(hf, Gender=="M")
hf.son <- hf.son[c("Father", "Height")]
str(hf.son)
f.mean <- mean(hf.son$Father)
s.mean <- mean(hf.son$Height)
cov.num <- sum( (hf.son$Father-f.mean) * (hf.son$Height - s.mean) )#covariance! 책에 나와있는 공식참고
(cov.xy <- cov.num / (nrow(hf.son) - 1))
# R함수를 이용한 공분산
cov(hf.son$Father, hf.son$Height)
(r.xy <- cov.xy / (sd(hf.son$Father) * sd(hf.son$Height)))
# R함수를 이용한 상관계수
cor(hf.son$Father, hf.son$Height)#공분산과 결과값이 같다.
# 그림 9-2
par(mfrow=c(1, 1), mar=c(4, 4, 1, 1))
plot(Height~Father, pch=16, data=hf.son, xlab="아버지의 키(인치)", ylab="아들의 키(인치)")
abline(lm(Height~Father, data=hf.son), col="red", lwd=2)
|
b3a14a1c6335d247997bb83cc6b3d685e8bd370e
|
1ba5b7c213871eb2b9aa5d194fa403f87d728193
|
/R/delete.R
|
2d68c617a0daf8e70a5c925381893aadd14c89f5
|
[
"MIT"
] |
permissive
|
noelnamai/RNeo4j
|
6a0c42ffe5a6f3f9ffc19d15ad25453696ea3760
|
4af57a9b00593109155e9f2c55108fe8b94c8f0b
|
refs/heads/master
| 2020-04-01T23:02:26.894316
| 2015-04-17T18:03:26
| 2015-04-17T18:03:26
| 34,324,382
| 1
| 0
| null | 2015-04-21T12:02:52
| 2015-04-21T12:02:51
|
R
|
UTF-8
|
R
| false
| false
| 496
|
r
|
delete.R
|
delete = function(...) UseMethod("delete")
delete.default = function(...) {
entities = list(...)
classes = lapply(entities, class)
stopifnot(all(vapply(classes, function(c) "entity" %in% c, logical(1))))
headers = setHeaders(entities[[1]])
urls = vapply(entities, function(x) (attr(x, "self")), "")
for(i in 1:length(urls)) {
http_request(urls[i],
"DELETE",
"No Content",
httpheader=headers)
}
return(invisible(NULL))
}
|
b368bec8ebbb6efd3d1921961cdcc244f16b1ea0
|
c32c54f47c35737ea4ba3a026c81b594fd02b1cf
|
/man/nbaSetFactorsForProj.Rd
|
2243e41286514d206bc59de9628cdf5f08313206
|
[] |
no_license
|
quinnpertuit/rDailyFantasy
|
cd46596122d979b5c389d67b19bc354109fa0722
|
fb00d802573c855f58d5b7b4d84f96d6724a66a6
|
refs/heads/master
| 2022-10-24T06:04:42.025973
| 2020-01-11T15:12:36
| 2020-01-11T15:12:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 552
|
rd
|
nbaSetFactorsForProj.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nbaSetFactorsForProjFunction.R
\name{nbaSetFactorsForProj}
\alias{nbaSetFactorsForProj}
\title{nba Factors Setting Function for Projections}
\usage{
nbaSetFactorsForProj(train = train, target = "ActualPoints")
}
\arguments{
\item{train}{dataframe}
\item{target}{target variable for machine learning}
}
\value{
df w/ factors correctly set...
}
\description{
nba Factors Setting Function for Projections
}
\examples{
nbaSetFactorsForProj(train=train,target="ActualPoints")
}
|
087e1fd318f6cc2834610751ade97fc3cd1d1708
|
7a2f2e124856390022fee7019cf484f819f0d36d
|
/code/test-lassosum2-auto.R
|
d1e9d36a16468b73a770fd30ce9f36c2e8858ad1
|
[] |
no_license
|
privefl/paper-lassosum2
|
1e214eafbba03f93c908a53997635d341ca6341e
|
420343346e867823f4091d4358fda8c53d05afe3
|
refs/heads/main
| 2023-04-22T23:29:53.222346
| 2021-04-29T07:41:57
| 2021-04-29T07:41:57
| 346,615,252
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,801
|
r
|
test-lassosum2-auto.R
|
library(bigsnpr)
ukb <- snp_attach("data/UKBB_imp_HM3.rds")
load("data/ind_gwas_val_test.RData")
(NCORES <- parallelly::availableCores() - 1L)
chr <- 22
ind.chr <- which(ukb$map$chromosome == chr)
corr0 <- readRDS(paste0("data/corr/chr", chr, ".rds"))
corr <- runonce::save_run(as_SFBM(corr0, "tmp-data/corr_chr22"),
"tmp-data/corr_chr22.rds")
stopifnot(length(ind.chr) == ncol(corr))
dim(corr) # 16410 x 16410
# big_copy(ukb$genotypes, ind.col = ind.chr,
# backingfile = "tmp-data/dosage_chr22")$save()
G <- big_attach("tmp-data/dosage_chr22.rds")
y <- snp_simuPheno(G, h2 = 0.2, M = 2000, ncores = NCORES)$pheno
ind.gwas <- sample(ind.gwas, 20e3)
# GWAS to get sumstats
gwas <- big_univLinReg(G, y[ind.gwas], ind.train = ind.gwas, ncores = NCORES)
library(dplyr)
df_beta <- gwas %>%
transmute(beta = estim, beta_se = std.err, n_eff = length(ind.gwas))
# lassosum2
beta_lassosum <- snp_lassosum2(corr, df_beta, ncores = NCORES)
(params <- attr(beta_lassosum, "grid_param"))
# validation
ind <- which(rowSums(beta_lassosum != 0) > 0)
pred_lassosum <- big_prodMat(G, beta_lassosum[ind, ], ind.col = ind, ncores = NCORES)
params$score <- big_univLinReg(as_FBM(pred_lassosum[ind.val, ]), y[ind.val])$score
# pseudo-validation
scale <- with(df_beta, sqrt(n_eff * beta_se^2 + beta^2))
beta_hat <- df_beta$beta / scale
fdr <- fdrtool::fdrtool(beta_hat, statistic = "correlation", plot = FALSE)
beta_hat_shrunk <- round(beta_hat * (1 - fdr$lfdr), 16)
params$auto_score <- apply(beta_lassosum, 2, function(beta) {
cat(".")
beta <- beta / scale
bRb <- crossprod(beta, bigsparser::sp_prodVec(corr, beta))
crossprod(beta, beta_hat_shrunk) / sqrt(bRb)
})
library(ggplot2)
qplot(auto_score, score, color = s, data = params) +
theme_bw(15) +
scale_color_viridis_c() +
labs(x = "Score from pseudo-validation", y = "Score from validation")
pval <- predict(gwas, log10 = FALSE)
fdr2 <- fdrtool::fdrtool(pval, statistic = "pvalue", plot = FALSE)
beta_hat_shrunk2 <- beta_hat * (1 - fdr2$lfdr)
params$auto_score2 <- apply(beta_lassosum, 2, function(beta) {
cat(".")
beta <- beta / scale
bRb <- crossprod(beta, bigsparser::sp_prodVec(corr, beta))
crossprod(beta, beta_hat_shrunk2) / sqrt(bRb)
})
plot_grid(
qplot(auto_score, score, color = s, data = params) +
theme_bw(15) +
scale_color_viridis_c() +
labs(x = "Score from pseudo-validation (using correlations)",
y = "Score from validation"),
qplot(auto_score2, score, color = s, data = params) +
theme_bw(15) +
scale_color_viridis_c() +
labs(x = "Score from pseudo-validation (using p-values)",
y = "Score from validation"),
scale = 0.95, labels = c("A", "B"), label_size = 16, ncol = 1
)
# ggsave("figures/pseudoval.pdf", width = 8, height = 9)
|
d9e5380765fa1164d1cccc00edc0dc8fae7a13bc
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/ebGenotyping/R/rlogit.R
|
15badbe7ee7b9b9de2f43f6bd74360ff369a1724
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
rlogit.R
|
rlogit <-
function(x) return(ifelse(x>100,1,exp(x)/(1+exp(x))))
|
bd5b26787c75c187deefafff3da79426d0d455d1
|
faa40c8b2cef4c58b652d1f22d8493e82941d266
|
/R/Createsubset.R
|
38062befc6d8f3300cc00443e58c6510dcc8a55b
|
[] |
no_license
|
AVJdataminer/Squeaky
|
5d4c311964de758ebadbfbf06d1add37d187dbf4
|
b3fda5491338c466e407deabfb0b1ffab3488c5c
|
refs/heads/master
| 2021-03-27T09:39:04.963702
| 2018-08-24T17:32:40
| 2018-08-24T17:32:40
| 102,994,776
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
r
|
Createsubset.R
|
Createsubset<-function(df,response,percent,pathout){
#example input:test2=Createsubset(test1,'TOBACCO_RATING_APPLIED_N',.3,path)
in_train <- caret::createDataPartition(df[['response']], p = percent, list = FALSE)
out=df[in_train,]
setwd(pathout)
write.csv(out, paste("df_subset.csv",sep =""), row.names = F)
return(out)
}
|
29f461dffa18d066f6877a7e5ffbe13f2709c8a2
|
023e03b73d8279bb3debffdf26e3bf071dce407a
|
/application/ui.R
|
6960803857c14a3aa8b35a8577a6dbdbcabba2d3
|
[] |
no_license
|
WEILU-ZHAO/marital
|
5caed707d0cc9d11d48a4195e7195fdb9b8f1650
|
cc4e2b6ebe0f626d14fd4ca93a4a6522e8e301ed
|
refs/heads/master
| 2020-03-19T10:10:14.082582
| 2018-06-06T15:31:18
| 2018-06-06T15:31:18
| 136,349,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,009
|
r
|
ui.R
|
library(shiny)
library(shinythemes)
library(readr)
library(ggplot2)
library(stringr)
library(dplyr)
library(DT)
library(tools)
library(marital)
library(ggpubr)
data("floridaCountyMarital")
countyNames<-levels(as.factor(floridaCountyMarital@geography$NAME))
codebookColon<-str_locate(countyNames, "County")[,1]
codebook<- data.frame(cbind(str_sub(countyNames, 0, codebookColon - 2),
floridaCountyMarital@geography$county))
colnames(codebook)<-c("NAME","county")
fluidPage(
theme=shinytheme("flatly"),
titlePanel("Marital by sex, 2009 - 2016", windowTitle = "Marital by sex"),
# Sidebar layout with a input and output definitions
sidebarLayout(
# Inputs
sidebarPanel(
h3("County"), # Third level header: County
# Select County for agePlot
selectInput(inputId = "county",
label = "Choose a County:",
choices = c(as.character(codebook$NAME)),
selected = "Miami-Dade"),
hr(),
# Show data table
checkboxInput(inputId = "show_data",
label = "Show data table",
value = FALSE),
br(),
# Built with Shiny by RStudio
br(),
h5("Built with",
img(src = "https://www.rstudio.com/wp-content/uploads/2014/04/shiny.png", height = "30px"),
"by",
img(src = "https://www.rstudio.com/wp-content/uploads/2014/07/RStudio-Logo-Blue-Gray.png", height = "30px"),
"."),width=3
),
# Output:
mainPanel(
tabsetPanel(id = "tabspanel", type = "tabs",
tabPanel(title = "Plot",
fluidRow(
column(6, plotOutput("maleNeverPlot", height = 250)), # notice the ,
column(6, plotOutput("femaleNeverPlot", height = 250))
), #end of this fluidRow notice comma needed before the next fluidRow()
fluidRow(
column(6, plotOutput("maleMarriedPlot", height = 250)), # notice the ,
column(6, plotOutput("femaleMarriedPlot", height = 250))
), #end of this fluidRow notice comma needed before the next fluidRow()
fluidRow(
column(6, plotOutput("maleabsentPlot", height = 250)), # notice the ,
column(6, plotOutput("femaleabsentPlot", height = 250))
), #end of this fluidRow notice comma needed before the next fluidRow()
fluidRow(
column(6, plotOutput("maleotherPlot", height = 250)), # notice the ,
column(6, plotOutput("femaleotherPlot", height = 250))
), #end of this fluidRow notice comma needed before the next fluidRow()
fluidRow(
column(6, plotOutput("maleWidowedPlot", height = 250)), # notice the ,
column(6, plotOutput("femaleWidowedPlot", height = 250))
), #end of this fluidRow notice comma needed before the next fluidRow()
fluidRow(
column(6, plotOutput("maleDivorcedPlot", height = 250)), # notice the ,
column(6, plotOutput("femaleDivorcedPlot", height = 250))
) #end of this fluidRow notice comma needed before the next fluidRow()
),
tabPanel(title = "Data",
br(),
DT::dataTableOutput(outputId = "Demotable")),
# New tab panel for Codebook
tabPanel("Codebook",
br(),
DT::dataTableOutput("codebook"))
)
)
))
|
727cdea384ae1e7f3bc90e3c62c31353d0915143
|
7b74f00cd80694634e6925067aaeb6572b09aef8
|
/2020/notes-2020/session_files/week5.R
|
47812f5e7cb26f46ea1b9595ca4554064a11c778
|
[] |
no_license
|
leafyoung/fe8828
|
64c3c52f1587a8e55ef404e8cedacbb28dd10f3f
|
ccd569c1caed8baae8680731d4ff89699405b0f9
|
refs/heads/master
| 2023-01-13T00:08:13.213027
| 2020-11-08T14:08:10
| 2020-11-08T14:08:10
| 107,782,106
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
week5.R
|
library(conflicted)
library(quantmod)
conflict_prefer('lag', 'dplyr')
conflict_prefer('filter', 'dplyr')
getSymbols("SPY", src = "yahoo", adjusted = TRUE, output.size = "full")
SPY
str(SPY)
head(SPY)
tail(SPY)
SPY['2009-11']
SPY['2009-11/']
tibble(Date = index(SPY['2009-11']), as_tibble(coredata(SPY['2009-11'])))
|
7c5ef93cc5aae2271a4918cef46f9e377bfee4e6
|
b84d89b3f67fbd57e2d41f42c23c1f82fe7ba9fd
|
/R/tef_fitAll2brms.R
|
384ac2087fe0a58f139cca29e78f3bc2e0dc2f34
|
[
"MIT"
] |
permissive
|
akcochrane/TEfits
|
04305849bd8393c9e816312085a228ccdbd621e3
|
e11b07b2d9fed9eb6e8221c8cfdd86b9e287180e
|
refs/heads/master
| 2023-06-08T00:04:21.025346
| 2023-06-07T22:20:11
| 2023-06-07T22:20:11
| 225,967,950
| 1
| 0
|
MIT
| 2023-06-07T22:20:13
| 2019-12-04T22:22:41
|
HTML
|
UTF-8
|
R
| false
| false
| 6,651
|
r
|
tef_fitAll2brms.R
|
#' Refit a TEfitAll model with brms
#'
#' \emph{This method has been superceded by} \code{\link{TEbrm}}. \emph{Please use
#' that method instead.}
#'
#' Passes a \code{\link{TEfitAll}} model to [nonlinear mixed-effects Bayesian] fitting using
#' \code{\link[brms]{brms-package}}. Note that, due to the extensive time needed to
#' fit \code{\link[brms]{brms-package}} models,
#' this function is less tested than most functions in the \code{TEfits} package. Functionality is
#' \strong{experimental}.
#'
#' Priors for nonlinear parameters are informed by the distributions of parameters in the \code{TEfitAll} object [models].
#' However, any fixed effects should be minimally influenced by these priors
#'
#' \code{TEfitAll} \code{bernoulli} models are fit using either \code{bernoulli} or \code{Beta} response
#' distributions in \code{brms} depending on whether the \code{TEfitAll} distrIibution is
#' binary. \code{TEfitAll} \code{logcosh} models are fit using a \code{asym_laplace} response distribution
#' in brms predicting the .5 quantile.
#'
#' If sampling issues occur, increased number of iterations are recommended. Also, running one chain at a time
#' may help; these models should later be merged using \code{brms::combine_models()}.
#'
#' @param TEs3s TEfitAll model
#' @param fixef Parameters vary as random effects by the TEs3s grouping variable. However, if you have main effects (e.g., group differences), enter them \emph{as a data frame} here.
#' @param nIter number of iterations
#' @param nChains number of chains
#' @param nCores number of cores
#' @param errFun the error function to use. Defaults to the same as the TEfitAll model, if possible.
#' @param prior_dispersion This number, multiplied by the SD of each TEfitAll parameter, is used as the prior SD for that parameter.
#'
#' @note
#' Under development. Partial functionality.
#'
#' @return A \code{\link[brms]{brms-package}} nonlinear mixed-effects model object.
#'
#' @examples
#' \dontrun{
#' dat <- anstrain
#' dat$condition <- rep(c('A','B'),each=500)
#'
#' # Model with time and one categorical fixed effect
#' mod_tef <- TEfitAll(dat[,c('acc','trialNum')], groupingVar = dat$subID)
#' mod_brm <- tef_fitAll2brms(mod_tef,nChains=1,fixef=data.frame(condition=dat$condition))
#'
#' # Model with time, one categorical fixed effect, and one by-groupingVar (subID) random slope
#' dat$absRat <- scale(abs(dat$ratio))
#' mod_tef <- TEfitAll(dat[,c('acc','trialNum',"absRat")], groupingVar = dat$subID,covarTerms=list(pRate=c(F)))
#' mod_brm <- tef_fitAll2brms(mod_tef,nChains=1,fixef=data.frame(condition=dat$condition))
#' }
#'
#' @export
tef_fitAll2brms <- function(TEs3s,fixef=NA,nIter= 2000,nChains=3,nCores=2,errFun=NA,prior_dispersion=2){
## To do:
##
## ## fit in a loop that keeps trying until a fit with samples, so that the user won't get something out with no samples
##
## ## should improve the docs, expecially around cores and chains. Someone who's never used brms should be able to get the broad strokes here.
## ## Also, seriously think about defaulting to one core and one chain. Given the likelihood of pathological sampling (the need for looping to sample), it's better.
require(brms)
# par_lims <- TEs3s$allFitList[[1]]$modList$parLims
pars_orig <- TEs3s$allFitList[[1]]$modList$pNames
pars <- gsub('_','',pars_orig)
groupingVarName <- attr(TEs3s$fitSummary,'grouping_var')
varIn <- data.frame(); for(curGroup in 1:length(TEs3s$allFitList)){
subDat <- data.frame(TEs3s$allFitList[[curGroup]]$data)
subDat[,groupingVarName] <-
rownames(TEs3s$allFits)[curGroup]
varIn <- rbind(varIn,subDat)}
if(!is.data.frame(fixef)){fixefNames <- '1'}else{
fixefNames <- names(fixef)
varIn <- cbind(varIn,fixef)
}
fixefs <- paste(paste0(fixefNames,'+'),collapse='')
parForm <- as.formula(paste(paste(pars,collapse='+'),'~',fixefs,'(1||',groupingVarName,')'))
brmForm <- brmsformula(as.formula(paste(
TEs3s$allFitList[[1]]$modList$respVar,'~',
gsub('_','',as.character(TEs3s$allFitList[[1]]$modList$modl_fun)[[3]])
))
,parForm
,nl=T)
## make priors (better to have normal guided by TEfit result and bounded by par_lims)
se2sd <- sqrt(length(TEs3s$allFitList))
brmPriors <- set_prior(paste0('normal(',TEs3s$fitSummary['mean',pars_orig[1]],',',
TEs3s$fitSummary['stdErr',pars_orig[1]]*se2sd*prior_dispersion,')'),
nlpar=pars[1] ## ,ub=par_lims$parMax[1],lb=par_lims$parMin[1]
) ; if(length(pars)>1){
for(curPar in 2:length(pars)){
brmPriors <- brmPriors+
set_prior(paste0('normal(',TEs3s$fitSummary['mean',pars_orig[curPar]],',',
TEs3s$fitSummary['stdErr',pars_orig[curPar]]*se2sd*prior_dispersion,')'),
nlpar=pars[curPar] ## ,ub=par_lims$parMax[curPar],lb=par_lims$parMin[curPar]
)
}}
## NEED TO ALSO HAVE VARIANCE PRIORS; HERE AND EVERYWHERE, HAVE VARIANCE PRIORS BE LOGNORMAL, WITH
## -3 ALWAYS BEING AT -4 SD AND +4SD BE AN UNLIKELY LARGE NUMBER
## WHEN IN DOUBT, DEFAULT TO LOGNORMAL(1,1)
if(is.na(errFun)){errFun <- as.character(unique(TEs3s$fitSummary$errFun))}
# Transform errFun into link functions
errorFun <- switch(errFun,
'rmse' = gaussian(),
'exGauss_mu' = exgaussian(),
'ols' = gaussian(),
'bernoulli' = bernoulli(link='identity'),
'logcosh' = asym_laplace()
)
if(errFun =='bernoulli'){
if(min(varIn[,1],na.rm=T)<0 || max(varIn[,1],na.rm=T) > 1){
cat('The response variable is outside [0,1].')}
else{
if(!all(unique(na.omit(varIn[,1]))[1:2]==c(0,1))){
cat('edge correction [.0001] was applied and a beta response distribution was used.')
varIn[,1] <- (varIn[,1]*.9998)+.0001
errorFun <- Beta(link = "identity")
}
}
}
## fit model
modSuccess <- F
while(!modSuccess){
brmModel <- brm(brmForm,
varIn,
prior = brmPriors,
chains = nChains,
family = errorFun,
iter = nIter,
thin=max(c(1,floor(nIter/4000))),
cores = getOption("mc.cores",nCores),
control = list(adapt_delta = .95,
max_treedepth = 50))
if(nChains==1){ ## currently tests for sampling only exist if only one chain is run
try({
. <- posterior_summary(brmModel )
modelSuccess <- T
})
}else{
modelSuccess <- T
}
}
return(brmModel)
}
|
09d944f103403cfbded460328be0bef5935ccd6a
|
a34b4aa795fbfe55d30cc5f26deeb7fb25b6edcc
|
/run_analysis.R
|
7e61b7dfb0333a3d6b6a4818dfa05319e76aecc7
|
[] |
no_license
|
Kuladeep27/Data-cleaning
|
615b601e850a5b825160436f5ca8379b94156c9b
|
cb2e2c6aa51a4548df72b34362bc19fcca9d3839
|
refs/heads/master
| 2022-12-08T07:58:23.727005
| 2020-08-31T04:38:43
| 2020-08-31T04:38:43
| 291,609,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,988
|
r
|
run_analysis.R
|
library(tidyverse)
# download the zip file
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "C:/Users/kulad/Documents/getdata_projectfiles_UCI HAR Dataset.zip")
# Features and activity labels
features <- read.table("~/UCI HAR Dataset/features.txt", stringsAsFactors = F)
featurenames <- c(features$V2) # Assigning the names for the final dataset
activityLabels <- read.table("~/UCI HAR Dataset/activity_labels.txt", stringsAsFactors = F)
# Read the training data sets
trainData_x <- read.table("~/UCI HAR Dataset/train/X_train.txt")
trainData_Y <- read.table("~/UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("~/UCI HAR Dataset/train/subject_train.txt")
names(trainSubjects) <- "SubjectID"
# Combine the columns
Tab0_train <- bind_cols(trainSubjects,trainData_Y,trainData_x)
testData_x <- read.table("~/unzip/UCI HAR Dataset/test/X_test.txt")#[featuresonly_MeanSTD]
testData_Y <- read.table("~/unzip/UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("~/unzip/UCI HAR Dataset/test/subject_test.txt")
names(testSubjects) <- "SubjectID"
Tab0_test <- bind_cols(testSubjects,testData_Y,testData_x)
# COmbine test and train datasets
CombinedData <- bind_rows(Tab0_train, Tab0_test)
names(CombinedData) <- c('SubjectID', "Activity", featurenames)
CombinedData$Activity <- factor(CombinedData$Activity,
levels = activityLabels[,1],
labels = activityLabels[,2])
# Extract mean and std columns only
CombinedData <- CombinedData[,grep("^Subject|^Activity|.*mean.*|.*std.*", colnames(CombinedData))]
# Transpose long to wide and calculate the mean
CombinedDataMean <- melt(CombinedData, id = c("SubjectID", "Activity")) %>%
dcast(SubjectID + Activity ~ variable, mean)
write.table(CombinedDataMean, "tidyData.txt", row.names = FALSE)
|
1c936d2647c2f9d81b63e90447b3e5045dc8afa0
|
739f4e16cd01f5d870551d0e1fe3e2e4c0c69b69
|
/Day 15 Classification/20170211_Batch24_CSE7405c_Clustering_PCA_LabActivity/Cereals.R
|
0a9adb43423c734bf3d7659f1f2971e422608b16
|
[] |
no_license
|
AyubQuadri/Backup
|
bff0365da7dd2a099cd946827860506d337cb88c
|
c6e2c1bf2719fb4271ce43268b3d9645f5187206
|
refs/heads/master
| 2021-01-20T10:32:15.022115
| 2017-08-28T13:19:53
| 2017-08-28T13:19:53
| 101,644,051
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,174
|
r
|
Cereals.R
|
###Author: Ayub Quadri ###
###Problem: Clustering Activity
rm(list = ls())
#set working directory
setwd("C:/Users/quadris/Desktop/My Learning/INSOFE CPEE/Day 15 Classification/20170211_Batch24_CSE7405c_Clustering_PCA_LabActivity")
#Read the data
cereals <- read.csv("Cereals.csv",header = T,sep = ",")
str(cereals)
#Pre-Processing
#remove name column
cereals <- cereals[-1]
#check for null values
sapply(cereals,function(x)sum(is.na(x)))
#replace NA with 0
cereals[is.na(cereals)] <- 0
##scaling
cereals <-scale(cereals)
str(cereals)
###------------------------- Hierarchical Clustering ------------------------###
# Ward's method
# distance matrix euclidean
d <- dist(cereals,method = "euclidean")
d
fit <- hclust(d, method="ward.D2")
plot(fit) # display dendogram
groups <- cutree(fit, k=6) # cut tree into 6 clusters
groups
# draw dendogram with red borders around the 6 clusters
rect.hclust(fit, k=6, border="red")
mydata_clusters=data.frame(cereals,groups)
###------------------------- K- means Clustering ------------------------###
# K-Means Cluster Analysis with k = 5
fit <- kmeans(cereals, 5) # 5 cluster solution
fit$withinss
sum(fit$withinss)
fit$betweenss
#study the mdoel
fit$cluster
fit$tot.withinss
# get cluster means
aggregate(cereals,by=list(fit$cluster),
FUN=mean)
# append cluster label to the actual data frame
cereals <- data.frame(cereals,
fit$cluster)
write.csv(cereals,"kmeans_2.csv")
head(cereals)
# K-means: Determine number of clusters by considering the withinness measure
wss <- 0
for (i in 1:15) {
wss[i] <- sum(kmeans(cereals,centers=i)$withinss)
}
# Ploting the within sum of square error for different clusters
plot(1:15, wss,
type="b",
xlab="Number of Clusters",
ylab="Within groups sum of squares")
# For unseen data, we compute its distance from all the cluster centroids
# and assigns it to that cluster that is nearest to it
test_datapoint <- cereals[sample(1:nrow(cereals),1),]
closest.cluster <- function(x) {
cluster.dist <- apply(fit$centers, 1, function(y) sqrt(sum((x-y)^2)))
print(cluster.dist)
return(which.min(cluster.dist)[1])
}
# Predicting which cluster the new data point belongs to based on the distance.
closest.cluster(test_datapoint)
# Checking the cluster stability
# Building the clusters on all data
fit1 <- kmeans(cereals, 10)
# Getting the cluster numbers
x <- fit1$cluster
# Building the clusters on 90 % of data
fit2 <- kmeans(cereals[1:74,], 10)
# Getting the cluster numbers
y <- fit2$cluster
unique(y)
# Loading the required libraries
install.packages('dtwclust')
library(dtwclust)
library(flexclust)
# Checking whether the same data points are falling into same cluster
# when we cluster on all the data or 90% of data.
randIndex(x[1:74], y)
library(mclust)
# Checking whether the same data points are falling into same cluster
# when we cluster on all the data or 90% of data.
adjustedRandIndex(x[1:74], y)
|
dd867cdcbbe1f14a8d16704199899596ebcc7f2c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NORMA/examples/ILF_cost_der.Rd.R
|
38cba16d571380b04d7f932b8e85f2db8ad556ea
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,218
|
r
|
ILF_cost_der.Rd.R
|
library(NORMA)
### Name: ILF_cost_der
### Title: Cost Functions Derivatives
### Aliases: ILF_cost_der beta_cost_der general_gaussian_cost_der
### general_laplace_cost_der moge_cost_der weibull_cost_der
### zero_gaussian_cost_der zero_laplace_cost_der
### ** Examples
# ILF derivative value at point phi=1 with default epsilon.
ILF_cost_der(1)
# ILF derivative value at point phi=1 with epsilon=2.
ILF_cost_der(1,2)
# Zero-mean Laplace loss function derivative value at point phi=1 with sigma=1.
zero_laplace_cost_der(1,1)
# General Laplace loss function derivative value at point phi=1 with mu=0 and sigma=1.
general_laplace_cost_der(1,1,0)
# Zero-mean Gaussian loss function derivative value at point phi=1 with sigma_cuad=1.
zero_gaussian_cost_der(1,1)
# General Gaussian loss function derivative value at point phi=1 with mu=0 and sigma_cuad=1.
general_gaussian_cost_der(1,1,0)
# Beta loss function derivative value at point phi=1 with alpha=2 and beta=3.
beta_cost_der(1,2,3)
# Weibull loss function derivative value at point phi=1 with lambda=2 and kappa=3.
weibull_cost_der(1,2,3)
# MOGE loss function derivative value at point phi=1 with lambda=2 ,alpha=3 and theta=4.
moge_cost_der(1,2,3,4)
|
b207454313453832edb6f0a590e146fbde5b82c8
|
3cd3f6c838790c492c9d027051f374f4ad5759f4
|
/man/cheating.Rd
|
d7f891960eb7c5e157e25d8c25a650c8fe1ff9d3
|
[] |
no_license
|
cran/rockchalk
|
0814a7d5c6d057ff67a3ddd87b878b1832fde166
|
b5ee84d33158b428b2fe423ac99aa3609ad09e2e
|
refs/heads/master
| 2022-09-01T08:14:20.829386
| 2022-08-06T16:00:06
| 2022-08-06T16:00:06
| 17,699,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,118
|
rd
|
cheating.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rockchalk-package.R
\docType{data}
\name{cheating}
\alias{cheating}
\title{Cheating and Looting in Japanese Electoral Politics}
\format{
data.frame: 16623 obs. on 27 variables
}
\source{
\url{https://bnyblade.com/research/publications/}.
}
\usage{
data(cheating)
}
\description{
Extracted from the "cheating-replication.dta" data file
with permission by the authors, Benjamin Nyblade and Steven
Reed. The Stata data file provided by the authors included
many constructed variables that have been omitted. Within
R, these can be easily re-contructed by users.
}
\details{
Special thanks to NyBlade and Reed for permission to repackage
this data. Also special thanks to them for creating an especially
transparent variable naming scheme.
The data set includes many columns for variables that can easily
be re-constructed from the columns that are provided here. While
Stata users might need to manually create 'dummy variables' and
interactions, R users generally do not do that manually.
These variables from the original data set were omitted:
Dummy variables for the year variable: c("yrd1", "yrd2", ...,
"yrd17", "yrd18")
Dummy variables for the ku variable:
c("ku1", "ku2", ..., "ku141", "ku142")
Constructed product variables: c("actualratiosq", "viabsq",
"viab_candcamp_divm", "viab_candothercamp_divm",
"viabsq_candcamp_divm", "viabsq_candothercamp_divm",
"absviab_candcamp", "absviab_candothercamp",
"absviab_candcamp_divm", "absviab_candothercamp_divm",
"viabsq_candcamp", "viabsq_candothercamp", "viab_candcamp",
"viab_candothercamp", "candothercamp_divm", "candcamp_divm",
"candcampminusm", "candothercampminusm", "predratiosq", "absviab")
Mean centered variables: constr2 <- c("viab_candcampminusm",
"viab_candothercampminusm", "viabsq_candothercampminusm",
"viabsq_candcampminusm")
In the end, we are left with these variables:
[1] "ku"
[2] "prefecture"
[3] "dist"
[4] "year"
[5] "yr"
[6] "cdnr"
[7] "jiban"
[8] "cheating"
[9] "looting"
[10] "actualratio"
[11] "viab"
[12] "inc"
[13] "cons"
[14] "ur"
[15] "newcand"
[16] "jwins"
[17] "cons_cwins"
[18] "oth_cwins"
[19] "camp"
[20] "fleader"
[21] "incablast"
[22] "predratio"
[23] "m"
[24] "candcamp"
[25] "candothercamp"
[26] "kunocheat"
[27] "kunoloot"
}
\examples{
require(rockchalk)
data(cheating)
table1model2 <- glm(cheating ~ viab + I(viab^2) + inc + cons + ur
+ newcand + jwins + cons_cwins + oth_cwins, family = binomial(link
= "logit"), data = cheating)
predictOMatic(table1model2)
predictOMatic(table1model2, interval = "confidence")
## The publication used "rare events logistic", which I'm not bothering
## with here because I don't want to invoke additional imported packages.
## But the ordinary logit results are proof of concept.
}
\references{
Benjamin Nyblade and Steven Reed, "Who Cheats? Who
Loots? Political Competition and Corruption in Japan, 1947-1993."
American Journal of Political Science 52(4): 926-41. October 2008.
}
\author{
Paul E. Johnson \email{pauljohn@ku.edu}, on behalf of Benjamin Nyblade and Steven Reed
}
\keyword{datasets}
|
b1b1b2960a15673e02bd9fb2297dd2322ff22741
|
3c5e3f9a4c5db7b26a0a2cf4c286aa8fd4edd051
|
/man/chi_sqQ.Rd
|
85da9cb633dd997d1196ce4a43ba5bcc7752af3d
|
[
"MIT"
] |
permissive
|
leahpom/MATH5793POMERANTZ
|
f8f8f639e1f522da200f57cd242173e58d11ab1d
|
b8357d19142f15c9d0d7fa27c03af4a64083a352
|
refs/heads/master
| 2023-04-20T06:21:21.447810
| 2021-05-04T04:37:17
| 2021-05-04T04:37:17
| 335,433,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 737
|
rd
|
chi_sqQ.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chi_sqQ.R
\name{chi_sqQ}
\alias{chi_sqQ}
\title{Values for Chi-Square Plot}
\usage{
chi_sqQ(df)
}
\arguments{
\item{df}{a bivariate data.frame}
}
\value{
j the observation number, ordered by the squared distance
d_jsq the squared distance
q_j the chi-square quantiles from the chi-square distribution
}
\description{
This function is for use in my shiny_MVN app to calculate the
values for the chi-square plot for assessing normality
}
\examples{
x1 <- c(108.28, 152.36, 95.04, 65.45, 62.97, 263.99, 265.19, 285.06, 92.01, 165.68)
x2 <- c(17.05, 16.59, 10.91, 14.14, 9.52, 25.33, 18.54, 15.73, 8.10, 11.13)
data <- data.frame(cbind(x1, x2))
chi_sqQ(data)
}
|
eac46b2c09284d41be950ae9e4f35f185b6a605c
|
a86f32451020888a23ea99ad4770fe6ec3a2aaaa
|
/R/zzz.R
|
3fd5332175abe5be6ce2a76dfabd21b9c90ec660
|
[] |
no_license
|
Geoany/spatstat.sphere
|
2d806b3a2ee78f83743c404be780fec3dcb5562f
|
9131f1e8c48d44ec97320d73f5949d7ac4473f98
|
refs/heads/master
| 2021-01-23T04:29:11.326783
| 2017-07-05T09:11:16
| 2017-07-05T09:11:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 650
|
r
|
zzz.R
|
#' Package documentation for spatstat.sphere
#'
#' The package `spatstat.sphere` extends the `spatstat` package to work with
#' point pattens on the sphere.
#'
#' @docType package
#' @import globe
#' @import s2
#' @import spatstat
#' @importFrom spatstat.utils splat resolve.defaults do.call.matched %orifnull% check.1.integer
#' @importFrom grDevices dev.off png
#' @importFrom graphics abline hist par plot rasterImage segments
#' @importFrom stats rbinom runif
#' @importFrom utils tail
### #' @importFrom jpeg readJPEG
### #' @importFrom png readPNG
### #' @importFrom Rcpp evalCpp
### #' @useDynLib spatstat.sphere
#' @name spatstat.sphere
NULL
|
4c28a5ddeb4f98ca9d1a24a1a4a0102189faa961
|
52c876f9d074ab0d4fb5dd569b338ce1595c1b3d
|
/project_code/Other source/dportal.R
|
9083f53f843e15853541d911b296b4839ee93c1d
|
[] |
no_license
|
danjwalton/covid_funding
|
50966c323f06a0c81e1a15ca02c9f42e9d8283d7
|
20e7624f4f2c1ebb8f8c1bb062d9e738c3441f10
|
refs/heads/master
| 2023-03-12T00:02:29.671886
| 2021-03-02T12:31:35
| 2021-03-02T12:31:35
| 293,513,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,429
|
r
|
dportal.R
|
required.packages <- c("data.table", "httr", "ggplot2", "WDI", "XML")
lapply(required.packages, require, character.only=T)
setwd("G:/My Drive/Work/GitHub/covid_funding/")
iati.1 <- xmlParse("http://d-portal.org/q.xml?from=act&limit=-1&distincton=aid&%2Fhumanitarian-scope@code=EP-2020-000012-001")
iati.2 <- xmlParse("http://d-portal.org/q.xml?from=act&limit=-1&distincton=aid&text_search=COVID-19")
iati.3 <- xmlParse("http://d-portal.org/q.xml?from=act&limit=-1&distincton=aid&text_search=COVID%2019")
iati.4 <- xmlParse("http://d-portal.org/q.xml?from=act&limit=-1&distincton=aid&%2Ftag@code=COVID-19")
iati.5 <- xmlParse("http://d-portal.org/q.xml?from=act&limit=-1&distincton=aid&%2Fhumanitarian-scope@code=HCOVD20")
# iatis <- ls()[grepl("iati.", ls())]
# iati.covid <- rbindlist(get(iatis))
transactions <- getNodeSet(iati.1, "//transaction")
test <- transactions[[1]]
transaction.data <- function(transaction, data.name, is.attr = F, in.parent = F){
if(in.parent) x <- xmlParent(transaction) else x <- transaction
if(is.attr) v <- (xmlAttrs(x[[data.name]])) else v <- (xmlValue(x[[data.name]]))
return(v)
}
iati_id <- transaction.data(transactions[[i]], 'iati-identifier', in.parent = T)
reporting_org_text <- transaction.data(transactions[[i]], 'reporting-org', in.parent = T)
reporting_org_ref <- transaction.data(test, 'reporting-org', is.attr = T, in.parent = T)['ref']
reporting_org_type <- transaction.data(test, 'reporting-org', is.attr = T, in.parent = T)['type']
value_currency <- if(!is.na(transaction.data(test, 'value', is.attr = T, in.parent = F)["currency"])){
transaction.data(test, 'value', is.attr = T, in.parent = F)["currency"] } else {
if(!is.na(transaction.data(test, 'default-currency', is.attr = F, in.parent = T))){
transaction.data(test, 'default-currency', is.attr = F, in.parent = T)
} else {
'USD'
}
}
transaction_date <- transaction.data(test, 'value', is.attr = T, in.parent = F)['value-date']
transaction_type_code <- transaction.data(test, 'transaction-type', is.attr = T)['code']
finance_type <- if(!is.na(transaction.data(test, 'finance-type', is.attr = F, in.parent = F))){
transaction.data(test, 'finance-type', is.attr = F, in.parent = F) } else {
if(!is.na(transaction.data(test, 'default-finance-type', is.attr = F, in.parent = T))){
transaction.data(test, 'default-finance-type', is.attr = F, in.parent = T)
} else {
""
}
}
|
7062001f0d85399b661e01bf936cf0f64bea87e2
|
e01e76528b33478f59c0be5773182da0ff6b9573
|
/man/elevation.Rd
|
aad0308dd427eeaf84337c3d69445e6a2f2f2c18
|
[] |
no_license
|
Nowosad/spDataLarge
|
6884d6d5a62fa189c040633a8dd72743d0d756f3
|
96e83955e0d50da5591d561680ae3279a4e10e1e
|
refs/heads/master
| 2023-01-05T03:16:51.557676
| 2022-12-23T10:04:03
| 2022-12-23T10:04:03
| 79,939,092
| 24
| 11
| null | 2022-12-23T09:57:40
| 2017-01-24T18:10:22
|
R
|
UTF-8
|
R
| false
| true
| 408
|
rd
|
elevation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{elevation}
\alias{elevation}
\title{Elevation raster data}
\format{
A RasterLayer object
}
\source{
\url{http://srtm.csi.cgiar.org/SRT-ZIP/SRTM_V41/SRTM_Data_GeoTiff/srtm_14_05.zip}
}
\usage{
elevation
}
\description{
Elevation raster data from SRTM of the Zion National Park area
}
\keyword{datasets}
|
d53310fc5389ccd661fad2056caad08955099607
|
e5ebddef173d10c4722c68f0ac090e5ecc626b8b
|
/IL2/bin/repeatability/pstat5-repeatability.R
|
8cc5f68c162113320be2f04efbd7120618a72b2b
|
[] |
no_license
|
pontikos/PhD_Projects
|
1179d8f84c1d7a5e3c07943e61699eb3d91316ad
|
fe5cf169d4624cb18bdd09281efcf16ca2a0e397
|
refs/heads/master
| 2021-05-30T09:43:11.106394
| 2016-01-27T15:14:37
| 2016-01-27T15:14:37
| 31,047,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,847
|
r
|
pstat5-repeatability.R
|
library(iterators)
library(plyr)
library(flowCore)
library(lubridate)
REPEATS <- structure(list(individual = c("CB00165D", "CB00366X", "CB00396E",
"CB00406Q", "CB01484M", "CB01494Y", "CB01495Z", "CB01498C", "CB01503H",
"CB01504J"), pch = c("a", "b", "c", "d", "e", "f", "g", "h",
"i", "j"), day1 = c("2012-11-29", "2012-11-07", "2012-09-25",
"2012-10-16", "2012-09-25", "2012-10-09", "2012-10-09", "2012-10-16",
"2012-11-07", "2012-11-07"), day2 = c("2013-03-07", "2013-03-27",
"2013-03-11", "2013-01-22", "2013-03-11", "2013-01-29", "2013-01-29",
"2013-01-22", "2013-03-07", "2013-03-27"), col = c("#0066FFFF",
"#FF9900FF", "#00FFFFFF", "#FF0099FF", "#33FF00FF", "#CCFF00FF",
"#CC00FFFF", "#3300FFFF", "#00FF66FF", "#FF0000FF"), day.diff = structure(c(98,
140, 167, 98, 167, 112, 112, 98, 120, 140), units = "days", class = "difftime"),
t1d = c('control', 'case', 'control', 'control', 'case', 'case', 'control', 'case', 'case', 'control')),
.Names = c("individual", "pch", "day1", "day2", "col", "day.diff", "t1d"), row.names = c("CB00165D",
"CB00366X", "CB00396E", "CB00406Q", "CB01484M", "CB01494Y", "CB01495Z",
"CB01498C", "CB01503H", "CB01504J"), class = "data.frame")
REPEATS <- rbind(cbind(REPEATS,date=REPEATS$day1),cbind(REPEATS,date=REPEATS$day2))
DOSES <- c( '0U', '0.1U', '10U', '1000U')
CLR.CELL.TYPES <- c("Lymphocytes", "Single Cells", "CD4", "Memory", "Memory Eff", "Memory Treg", "Naive", "Naive Eff", "Naive Treg")
CELL.TYPES <- c('Memory Eff', 'Memory Treg', 'Naive Eff', 'Naive Treg')
blues4 <- blues9[5:9]
BASE.DIR <- '~nikolas/dunwich/Projects/IL2/PSTAT5-CD25-CD45RA-CD4-FOXP3/Lymphocytes/RData'
#header
cat('individual', 'norm', 'date.day1', 'date.day2', 'fi.auc.day1', 'pct.auc.day1', 'fi.auc.day2', 'pct.auc.day2', sep=',')
cat('\n')
inverse = function (f, lower = -100, upper = 100) {
function (y) uniroot((function (x) f(x) - y), lower = lower, upper = upper)[1]
}
getDate <- function(x) format(as.Date(dmy(x@description$`$DATE`, quiet=T)), '%Y-%m-%d')
f <- function(x, y, qmin=0, qmax=1) (quantile(x,qmax)-quantile(x,qmin)) * (y-quantile(y,qmin))/(quantile(y,qmax)-quantile(y,qmin)) + quantile(x,qmin)
g <- curv2Filter("FSC-A", "SSC-A", filterId="test filter")
#head(d[d$individual=='CB01494Y',], n=101)->d2
print(pch <- data.frame(cbind(name=rep.individuals, pch=letters[1:length(rep.individuals)]),stringsAsFactors=F))
d -> d2
pdf('~nikolas/IL2/Plots/Rplot.pdf')
r <- range(d2[,2],d2[,3])
plot(d2[,2], d2[,3],pch=pch[pch$name==d2$individual,'pch'], xlab='pSTAT5 MFI Day 1', ylab='pSTAT5 MFI Day 2', xlim=r, ylim=r)
abline(b=1,a=0)
#co <- coef(line(d2[,2],d2[,3]))
#abline(co)
d3 <- as.matrix(cbind(1,d2[,2:3]))
#points(d3[,c(1,2)]%*%co, d3[,3], pch=20)
dev.off()
#
f1 <- rep.fcs$CB01494Y$day1$U1000
f2 <- rep.fcs$CB01494Y$day2$U1000
#
f1 <- read.FCS('~/dunwich/spade.output/units/T1D_TC8_pSTAT5_Treg_I021767J_CB01494Y_0U/T1D_TC8_pSTAT5_Treg_I021767J_CB01494Y_0U.fcs.downsample.fcs')
f2 <- read.FCS('~/dunwich/spade.output/units/T1D_TC8_rep_pSTAT5_Treg_CB01494Y_I023360Q_0U/T1D_TC8_rep_pSTAT5_Treg_CB01494Y_I023360Q_0U.fcs.downsample.fcs')
#
f1 <- read.FCS('~/dunwich/spade.output/units/T1D_TC8_pSTAT5_Treg_I021767J_CB01494Y_1000U/T1D_TC8_pSTAT5_Treg_I021767J_CB01494Y_1000U.fcs.downsample.fcs')
f2 <- read.FCS('~/dunwich/spade.output/units/T1D_TC8_rep_pSTAT5_Treg_CB01494Y_I023360Q_1000U/T1D_TC8_rep_pSTAT5_Treg_CB01494Y_I023360Q_1000U.fcs.downsample.fcs')
d <- data.frame()
for (x in colnames(f1@exprs)) {
qq<-qqplot(lgcl(f1@exprs[,x]),lgcl(f2@exprs[,x]),plot.it=F)
l <- line(qq$y, qq$x)
a <- l$coefficients[1]
b <- l$coefficients[2]
d <- rbind(d, data.frame(param=x, a=a, b=b))
}
d1000.down <- d
source("pstat-rep_functions.R")
trans <- logicleTransform()
for (individual in rep.individuals) {
cat(individual,"\t")
dir.create(individual,showWarnings=FALSE)
make.plots(individual)
}
for (individual in rep.individuals) {
cat(individual,"\t")
dir.create(individual,showWarnings=FALSE)
make.plots(individual, cell.type='lymphocytes')
}
individual <- "CB01494Y"
##outdir <- sprintf('/home/nikolas/Tony/pstat5-repeatability/pstat5-norm/%s/', individual)
outdir <- individual
dir.create(outdir)
##pdf(sprintf('/home/nikolas/Tony/pstat5-repeatability/pstat5-norm/%s/norm-none.pdf', individual))
myp(day1.1, day1.2, day=1, new=TRUE)
myp(day2.1, day2.2, day=2)
myp(day1.1, day1.2, day=1, new=TRUE, adjust=TRUE)
myp(day2.1, day2.2, day=2, adjust=TRUE)
myp(day1.1, day1.2, day=1, q=0.01, new=TRUE)
myp(day2.1, day2.2, day=2, q=0.01)
baseline.conc <- trans(day2.1@exprs[,7])
baseline.conc.ecdf <- ecdf(baseline.conc)
high.conc <- trans(day2.2@exprs[,7])
high.conc.ecdf <- ecdf(high.conc)
myp(baseline.conc.ecdf, high.conc.ecdf, day=2)
#dev.off()
qmin <- 0.01
qmax <- 1-qmin
pdf(sprintf('/home/nikolas/Tony/pstat5-repeatability/pstat5-norm/%s/norm-%s-%s.pdf', individual, as.character(qmin), as.character(qmax)))
baseline.conc <- trans(day1.1@exprs[,7])
baseline.conc.ecdf <- ecdf(baseline.conc)
high.conc <- trans(day1.2@exprs[,7])
high.conc <- f(baseline.conc, high.conc, qmin, qmax)
high.conc.ecdf <- ecdf(high.conc)
inv.baseline.conc <- function(x) sapply(x, function(x) inverse(baseline.conc.ecdf, -1, 5)(x)$root)
inv.high.conc <- function(x) sapply(x, function(x) inverse(high.conc.ecdf, -1, 5)(x)$root)
inv.ecdf.diff <- function(x) return(inv.high.conc(x)-inv.baseline.conc(x))
x <- seq(0,1,.01)
plot(baseline.conc.ecdf, xlab='pstat5', main=paste(individual, 'baseline/1000U day1 vs day2 qnorm:', qmin, '-', qmax), col='white')
lines(baseline.conc.ecdf, col='lightblue')
lines(high.conc.ecdf, col='darkblue')
lines(inv.ecdf.diff(x), x, col='blue', lty=2)
inv.ecdf.diff.area <- abs(sum(inv.ecdf.diff(x)))
fi.auc.day1 <- round(inv.ecdf.diff.area,digits=2)
text(0, 1, fi.auc.day1, col='blue')
x <- seq(min(baseline.conc), max(baseline.conc), .1)
ecdf.diff <- function(x) return(-high.conc.ecdf(x)+baseline.conc.ecdf(x))
lines(x, (ecdf.diff(x)), lty=2, col='blue')
ecdf.diff.area <- abs(sum(ecdf.diff(x)))
pct.auc.day1 <- round(ecdf.diff.area,digits=2)
text(4, 0, pct.auc.day1, col='blue')
baseline.conc <- trans(day2.1@exprs[,7])
baseline.conc.ecdf <- ecdf(baseline.conc)
high.conc <- trans(day2.2@exprs[,7])
high.conc <- f(baseline.conc, high.conc, qmin, qmax)
high.conc.ecdf <- ecdf(high.conc)
inv.baseline.conc <- function(x) sapply(x, function(x) inverse(baseline.conc.ecdf, -1, 5)(x)$root)
inv.high.conc <- function(x) sapply(x, function(x) inverse(high.conc.ecdf, -1, 5)(x)$root)
inv.ecdf.diff <- function(x) return(inv.high.conc(x)-inv.baseline.conc(x))
x <- seq(0,1,.01)
lines(baseline.conc.ecdf, col='lightgreen')
lines(high.conc.ecdf, col='darkgreen')
lines(inv.ecdf.diff(x), x, col='green', lty=2)
inv.ecdf.diff.area <- abs(sum(inv.ecdf.diff(x)))
fi.auc.day2 <- round(inv.ecdf.diff.area,digits=2)
text(median(inv.ecdf.diff(x)), median(x), fi.auc.day2, col='green')
x <- seq(min(baseline.conc), max(baseline.conc), .1)
ecdf.diff <- function(x) return(-high.conc.ecdf(x)+baseline.conc.ecdf(x))
lines(x, (ecdf.diff(x)), lty=2, col='green')
ecdf.diff.area <- abs(sum(ecdf.diff(x)))
pct.auc.day2 <- round(ecdf.diff.area,digits=2)
text(median(x), median(ecdf.diff(x)), pct.auc.day2, col='green')
dev.off()
cat(individual, paste(as.character(qmin),as.character(qmax),sep='-'), getDate(day1.1), getDate(day2.1), fi.auc.day1, pct.auc.day1, fi.auc.day2, pct.auc.day2, sep=',')
cat('\n')
# pSTAT5 MFI
load(file.path('~/dunwich/Projects/IL2/PSTAT5-CD25-CD45RA-CD4-FOXP3/CellPhenotypes/magnetic-manual-gates2/','pstat5.mfi.RData'))
raw.pstat5mfi <- pstat5.mfi
baseline.raw.pstat5mfi <- cbind(pstat5.mfi[,c('individual','date','cell.type','0U')],pstat5.mfi[,c('01U','10U','1000U')]-pstat5.mfi[,'0U'])
pdf('~nikolas/Thesis/figures/pstat5-mfi-cellsubsets-repeatability.pdf')
par(mfrow=c(2,2))
# Memory Eff
dim(raw.repeats <- merge(raw.pstat5mfi[which(raw.pstat5mfi$cell.type=="Memory Eff"),], REPEATS))
raw.pstat5 <- cbind(raw.repeats[c(TRUE,FALSE),'10U'],raw.repeats[c(FALSE,TRUE),'10U'])
dim(baseline.raw.repeats <- merge(baseline.raw.pstat5mfi[which(baseline.raw.pstat5mfi$cell.type=="Memory Eff"),], REPEATS))
baseline.raw.pstat5 <- cbind(baseline.raw.repeats[c(TRUE,FALSE),'10U'],baseline.raw.repeats[c(FALSE,TRUE),'10U'])
xlim <- range(raw.pstat5,baseline.raw.pstat5)
plot(raw.pstat5, xlim=xlim, ylim=xlim, pch=raw.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: pSTAT5 MFI', ylab='day 2: pSTAT5 MFI', col='black', main='Memory Eff 10U')
points(baseline.raw.pstat5, pch=baseline.raw.repeats[c(TRUE,FALSE),'pch'], col='red')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(raw.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.raw.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red'), bty='n')
abline(b=1, a=0)
# Memory Treg
dim(raw.repeats <- merge(raw.pstat5mfi[which(raw.pstat5mfi$cell.type=="Memory Treg"),], REPEATS))
raw.pstat5 <- cbind(raw.repeats[c(TRUE,FALSE),'01U'],raw.repeats[c(FALSE,TRUE),'01U'])
dim(baseline.raw.repeats <- merge(baseline.raw.pstat5mfi[which(baseline.raw.pstat5mfi$cell.type=="Memory Treg"),], REPEATS))
baseline.raw.pstat5 <- cbind(baseline.raw.repeats[c(TRUE,FALSE),'01U'],baseline.raw.repeats[c(FALSE,TRUE),'01U'])
xlim <- range(raw.pstat5,baseline.raw.pstat5)
plot(raw.pstat5, xlim=xlim, ylim=xlim, pch=raw.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: pSTAT5 MFI', ylab='day 2: pSTAT5 MFI', col='black', main='Memory Treg 0.1U')
points(baseline.raw.pstat5, pch=baseline.raw.repeats[c(TRUE,FALSE),'pch'], col='red')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(raw.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.raw.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red'), bty='n')
abline(b=1, a=0)
# Naive Eff
dim(raw.repeats <- merge(raw.pstat5mfi[which(raw.pstat5mfi$cell.type=="Naive Eff"),], REPEATS))
raw.pstat5 <- cbind(raw.repeats[c(TRUE,FALSE),'1000U'],raw.repeats[c(FALSE,TRUE),'1000U'])
dim(baseline.raw.repeats <- merge(baseline.raw.pstat5mfi[which(baseline.raw.pstat5mfi$cell.type=="Naive Eff"),], REPEATS))
baseline.raw.pstat5 <- cbind(baseline.raw.repeats[c(TRUE,FALSE),'1000U'],baseline.raw.repeats[c(FALSE,TRUE),'1000U'])
xlim <- range(raw.pstat5,baseline.raw.pstat5)
plot(raw.pstat5, xlim=xlim, ylim=xlim, pch=raw.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: pSTAT5 MFI', ylab='day 2: pSTAT5 MFI', col='black', main='Naive Eff 1000U')
points(baseline.raw.pstat5, pch=baseline.raw.repeats[c(TRUE,FALSE),'pch'], col='red')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(raw.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.raw.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red'), bty='n')
abline(b=1, a=0)
# Naive Treg
dim(raw.repeats <- merge(raw.pstat5mfi[which(raw.pstat5mfi$cell.type=="Naive Treg"),], REPEATS))
raw.pstat5 <- cbind(raw.repeats[c(TRUE,FALSE),'01U'],raw.repeats[c(FALSE,TRUE),'01U'])
dim(baseline.raw.repeats <- merge(baseline.raw.pstat5mfi[which(baseline.raw.pstat5mfi$cell.type=="Naive Treg"),], REPEATS))
baseline.raw.pstat5 <- cbind(baseline.raw.repeats[c(TRUE,FALSE),'01U'],baseline.raw.repeats[c(FALSE,TRUE),'01U'])
xlim <- range(raw.pstat5,baseline.raw.pstat5)
plot(raw.pstat5, xlim=xlim, ylim=xlim, pch=raw.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: pSTAT5 MFI', ylab='day 2: pSTAT5 MFI', col='black', main='Naive Treg 0.1U')
points(baseline.raw.pstat5, pch=baseline.raw.repeats[c(TRUE,FALSE),'pch'], col='red')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(raw.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.raw.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red'), bty='n')
abline(b=1, a=0)
dev.off()
# nn pSTAT5 MFI
load(file.path('~/dunwich/Projects/IL2/PSTAT5-CD25-CD45RA-CD4-FOXP3/CellPhenotypes/magnetic-manual-gates2/','nn.pstat5.mfi.RData'))
nn.pstat5mfi <- pstat5.mfi[,c('individual','date','cell.type',grep('^X',colnames(pstat5.mfi),value=TRUE))]
colnames(nn.pstat5mfi) <- c('individual','date','cell.type',paste('PSTAT5',1:4,sep='.'))
baseline.nn.pstat5mfi <- pstat5.mfi[,c('individual','date','cell.type',grep('^base.*U',colnames(pstat5.mfi),value=TRUE))]
colnames(baseline.nn.pstat5mfi) <- c('individual','date','cell.type',paste('diff','PSTAT5',1:4,sep='.'))
#
pdf('~nikolas/Thesis/figures/nn-pstat5-mfi-cellsubsets-repeatability.pdf')
par(mfrow=c(2,2))
# Memory Eff
dim(nn.repeats <- merge(nn.pstat5mfi[which(nn.pstat5mfi$cell.type=="Memory Eff"),], REPEATS))
nn.pstat5 <- cbind(nn.repeats[c(TRUE,FALSE),'PSTAT5.3'],nn.repeats[c(FALSE,TRUE),'PSTAT5.3'])
dim(baseline.nn.repeats <- merge(baseline.nn.pstat5mfi[which(baseline.nn.pstat5mfi$cell.type=="Memory Eff"),], REPEATS))
baseline.nn.pstat5 <- cbind(baseline.nn.repeats[c(TRUE,FALSE),'diff.PSTAT5.3'],baseline.nn.repeats[c(FALSE,TRUE),'diff.PSTAT5.3'])
xlim <- range(nn.pstat5,baseline.nn.pstat5)
plot(nn.pstat5, xlim=xlim, ylim=c(0,1), pch=nn.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: pSTAT5 MFI', ylab='day 2: pSTAT5 MFI', col='black', main='Memory Eff 10U')
points(baseline.nn.pstat5, pch=baseline.nn.repeats[c(TRUE,FALSE),'pch'], col='red')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(nn.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.nn.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red'), bty='n')
abline(b=1, a=0)
# Memory Treg
dim(nn.repeats <- merge(nn.pstat5mfi[which(nn.pstat5mfi$cell.type=="Memory Treg"),], REPEATS))
nn.pstat5 <- cbind(nn.repeats[c(TRUE,FALSE),'PSTAT5.2'],nn.repeats[c(FALSE,TRUE),'PSTAT5.2'])
dim(baseline.nn.repeats <- merge(baseline.nn.pstat5mfi[which(baseline.nn.pstat5mfi$cell.type=="Memory Treg"),], REPEATS))
baseline.nn.pstat5 <- cbind(baseline.nn.repeats[c(TRUE,FALSE),'diff.PSTAT5.2'],baseline.nn.repeats[c(FALSE,TRUE),'diff.PSTAT5.2'])
xlim <- range(nn.pstat5,baseline.nn.pstat5)
plot(nn.pstat5, xlim=xlim, ylim=c(0,1), pch=nn.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: pSTAT5 MFI', ylab='day 2: pSTAT5 MFI', col='black', main='Memory Treg 0.1U')
points(baseline.nn.pstat5, pch=baseline.nn.repeats[c(TRUE,FALSE),'pch'], col='red')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(nn.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.nn.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red'), bty='n')
abline(b=1, a=0)
# Naive Eff
dim(nn.repeats <- merge(nn.pstat5mfi[which(nn.pstat5mfi$cell.type=="Naive Eff"),], REPEATS))
nn.pstat5 <- cbind(nn.repeats[c(TRUE,FALSE),'PSTAT5.4'],nn.repeats[c(FALSE,TRUE),'PSTAT5.4'])
dim(baseline.nn.repeats <- merge(baseline.nn.pstat5mfi[which(baseline.nn.pstat5mfi$cell.type=="Naive Eff"),], REPEATS))
baseline.nn.pstat5 <- cbind(baseline.nn.repeats[c(TRUE,FALSE),'diff.PSTAT5.4'],baseline.nn.repeats[c(FALSE,TRUE),'diff.PSTAT5.4'])
xlim <- range(nn.pstat5,baseline.nn.pstat5)
plot(nn.pstat5, xlim=xlim, ylim=c(0,1), pch=nn.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: pSTAT5 MFI', ylab='day 2: pSTAT5 MFI', col='black', main='Naive Eff 1000U')
points(baseline.nn.pstat5, pch=baseline.nn.repeats[c(TRUE,FALSE),'pch'], col='red')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(nn.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.nn.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red'), bty='n')
abline(b=1, a=0)
# Naive Treg
dim(nn.repeats <- merge(nn.pstat5mfi[which(nn.pstat5mfi$cell.type=="Naive Treg"),], REPEATS))
nn.pstat5 <- cbind(nn.repeats[c(TRUE,FALSE),'PSTAT5.2'],nn.repeats[c(FALSE,TRUE),'PSTAT5.2'])
dim(baseline.nn.repeats <- merge(baseline.nn.pstat5mfi[which(baseline.nn.pstat5mfi$cell.type=="Naive Treg"),], REPEATS))
baseline.nn.pstat5 <- cbind(baseline.nn.repeats[c(TRUE,FALSE),'diff.PSTAT5.2'],baseline.nn.repeats[c(FALSE,TRUE),'diff.PSTAT5.2'])
xlim <- range(nn.pstat5,baseline.nn.pstat5)
plot(nn.pstat5, xlim=xlim, ylim=c(0,1), pch=nn.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: pSTAT5 MFI', ylab='day 2: pSTAT5 MFI', col='black', main='Naive Treg 0.1U')
points(baseline.nn.pstat5, pch=baseline.nn.repeats[c(TRUE,FALSE),'pch'], col='red')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(nn.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.nn.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red'), bty='n')
abline(b=1, a=0)
dev.off()
# % PSTAT5+
print(load(file.path('~/dunwich/Projects/IL2/PSTAT5-CD25-CD45RA-CD4-FOXP3/CellPhenotypes/magnetic-manual-gates2/','pstat5.pos.RData')))
raw.pstat5pos <- pstat5.pos
print(load(file.path('~/dunwich/Projects/IL2/PSTAT5-CD25-CD45RA-CD4-FOXP3/CellPhenotypes/magnetic-manual-gates2/','nn.pstat5.pos.RData')))
nn.pstat5pos <- pstat5.pos
print(load(file.path('~/dunwich/Projects/IL2/PSTAT5-CD25-CD45RA-CD4-FOXP3/CellPhenotypes/magnetic-manual-gates2/','nn.base.pstat5.pos.RData')))
head(baseline.nn.pstat5pos <- pstat5.pos)
#
pdf('~nikolas/Thesis/figures/pstat5-pos-cellsubsets-repeatability.pdf')
par(mfrow=c(2,2))
# Memory Eff
dim(raw.repeats <- merge(raw.pstat5pos[which(raw.pstat5pos$cell.type=="Memory Eff"),], REPEATS))
raw.pstat5 <- cbind(raw.repeats[c(TRUE,FALSE),'10U'],raw.repeats[c(FALSE,TRUE),'10U'])
dim(nn.repeats <- merge(nn.pstat5pos[which(nn.pstat5pos$cell.type=="Memory Eff"),], REPEATS))
nn.pstat5 <- cbind(nn.repeats[c(TRUE,FALSE),'10U'],nn.repeats[c(FALSE,TRUE),'10U'])
dim(baseline.nn.repeats <- merge(baseline.nn.pstat5pos[which(baseline.nn.pstat5pos$cell.type=="Memory Eff"),], REPEATS))
baseline.nn.pstat5 <- cbind(baseline.nn.repeats[c(TRUE,FALSE),'10U'],baseline.nn.repeats[c(FALSE,TRUE),'10U'])
#xlim <- range(raw.pstat5, nn.pstat5,baseline.nn.pstat5)
xlim <- c(0,100)
plot(raw.pstat5, xlim=xlim, ylim=c(0,1), pch=raw.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: % pSTAT5+', ylab='day 2: % pSTAT5+', col='black', main='Memory Eff 10U')
points(nn.pstat5, pch=nn.repeats[c(TRUE,FALSE),'pch'], col='red')
points(baseline.nn.pstat5, pch=baseline.nn.repeats[c(TRUE,FALSE),'pch'], col='blue')
rp <- vector('expression',3)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(raw.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(nn.pstat5)[1,2]**2,3))**2,3))[2]
rp[3] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.nn.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red','blue'), bty='n')
abline(b=1, a=0)
# Memory Treg
dim(raw.repeats <- merge(raw.pstat5pos[which(raw.pstat5pos$cell.type=="Memory Treg"),], REPEATS))
raw.pstat5 <- cbind(raw.repeats[c(TRUE,FALSE),'01U'],raw.repeats[c(FALSE,TRUE),'01U'])
dim(nn.repeats <- merge(nn.pstat5pos[which(nn.pstat5pos$cell.type=="Memory Treg"),], REPEATS))
nn.pstat5 <- cbind(nn.repeats[c(TRUE,FALSE),'01U'],nn.repeats[c(FALSE,TRUE),'01U'])
dim(baseline.nn.repeats <- merge(baseline.nn.pstat5pos[which(baseline.nn.pstat5pos$cell.type=="Memory Treg"),], REPEATS))
baseline.nn.pstat5 <- cbind(baseline.nn.repeats[c(TRUE,FALSE),'01U'],baseline.nn.repeats[c(FALSE,TRUE),'01U'])
#xlim <- range(nn.pstat5,baseline.nn.pstat5)
plot(raw.pstat5, xlim=xlim, ylim=c(0,1), pch=raw.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: % pSTAT5+', ylab='day 2: % pSTAT5+', col='black', main='Memory Treg 0.1U')
points(nn.pstat5, pch=nn.repeats[c(TRUE,FALSE),'pch'], col='red')
points(baseline.nn.pstat5, pch=baseline.nn.repeats[c(TRUE,FALSE),'pch'], col='blue')
rp <- vector('expression',3)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(raw.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(nn.pstat5)[1,2]**2,3))**2,3))[2]
rp[3] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.nn.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red','blue'), bty='n')
abline(b=1, a=0)
# Naive Eff
dim(raw.repeats <- merge(raw.pstat5pos[which(raw.pstat5pos$cell.type=="Naive Eff"),], REPEATS))
raw.pstat5 <- cbind(raw.repeats[c(TRUE,FALSE),'1000U'],raw.repeats[c(FALSE,TRUE),'1000U'])
dim(nn.repeats <- merge(nn.pstat5pos[which(nn.pstat5pos$cell.type=="Naive Eff"),], REPEATS))
nn.pstat5 <- cbind(nn.repeats[c(TRUE,FALSE),'1000U'],nn.repeats[c(FALSE,TRUE),'1000U'])
dim(baseline.nn.repeats <- merge(baseline.nn.pstat5pos[which(baseline.nn.pstat5pos$cell.type=="Naive Eff"),], REPEATS))
baseline.nn.pstat5 <- cbind(baseline.nn.repeats[c(TRUE,FALSE),'1000U'],baseline.nn.repeats[c(FALSE,TRUE),'1000U'])
#xlim <- range(nn.pstat5,baseline.nn.pstat5)
plot(raw.pstat5, xlim=xlim, ylim=c(0,1), pch=raw.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: % pSTAT5+', ylab='day 2: % pSTAT5+', col='black', main='Naive Eff 1000U')
points(nn.pstat5, pch=nn.repeats[c(TRUE,FALSE),'pch'], col='red')
points(baseline.nn.pstat5, pch=baseline.nn.repeats[c(TRUE,FALSE),'pch'], col='blue')
rp <- vector('expression',3)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(raw.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(nn.pstat5)[1,2]**2,3))**2,3))[2]
rp[3] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.nn.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red','blue'), bty='n')
abline(b=1, a=0)
# Naive Treg
dim(raw.repeats <- merge(raw.pstat5pos[which(raw.pstat5pos$cell.type=="Naive Treg"),], REPEATS))
raw.pstat5 <- cbind(raw.repeats[c(TRUE,FALSE),'01U'],raw.repeats[c(FALSE,TRUE),'01U'])
dim(nn.repeats <- merge(nn.pstat5pos[which(nn.pstat5pos$cell.type=="Naive Treg"),], REPEATS))
nn.pstat5 <- cbind(nn.repeats[c(TRUE,FALSE),'01U'],nn.repeats[c(FALSE,TRUE),'01U'])
dim(baseline.nn.repeats <- merge(baseline.nn.pstat5pos[which(baseline.nn.pstat5pos$cell.type=="Naive Treg"),], REPEATS))
baseline.nn.pstat5 <- cbind(baseline.nn.repeats[c(TRUE,FALSE),'01U'],baseline.nn.repeats[c(FALSE,TRUE),'01U'])
#xlim <- range(nn.pstat5,baseline.nn.pstat5)
plot(raw.pstat5, xlim=xlim, ylim=c(0,1), pch=raw.repeats[c(TRUE,FALSE),'pch'], xlab='day 1: % pSTAT5+', ylab='day 2: % pSTAT5+', col='black', main='Naive Treg 0.1U')
points(nn.pstat5, pch=nn.repeats[c(TRUE,FALSE),'pch'], col='red')
points(baseline.nn.pstat5, pch=baseline.nn.repeats[c(TRUE,FALSE),'pch'], col='blue')
rp <- vector('expression',2)
rp[1] <- substitute(expression(r^2 == r2),list(r2=(round(cor(raw.pstat5)[1,2]**2,3))**2,3))[2]
rp[2] <- substitute(expression(r^2 == r2),list(r2=(round(cor(nn.pstat5)[1,2]**2,3))**2,3))[2]
rp[3] <- substitute(expression(r^2 == r2),list(r2=(round(cor(baseline.nn.pstat5)[1,2]**2,3))**2,3))[2]
legend('topleft', legend=rp, text.col=c('black','red','blue'), bty='n')
abline(b=1, a=0)
dev.off()
fun.plot <- function(d,main) {
#d[is.na(d)] <- 0
s <- 0
if (sum(is.na(d[,'0U']))>1) {
d <- d[,-which('0U'==colnames(d))]
s <- 1
}
#plot(NULL, xlim=c(0,3), ylim=range(d[,grep('U',colnames(d))]), xaxt='n', xlab='dose', ylab=expression(r^2), main=main)
plot(NULL, xlim=c(0,3), ylim=range(d[,grep('U',colnames(d))]), xaxt='n', xlab='dose', ylab='RMSD', main=main)
title(nextElem(figure.labels), adj=0)
axis(1, at=0:3, labels=DOSES)
sapply( 1:nrow(d), function(i) lines(s:3, d[i,grep('U',colnames(d))], col=i, lwd=2) )
legend('topleft', CELL.TYPES, text.col=1:4, bty='n')
}
# correlation
fun <- function(x) cor(x[c(TRUE,FALSE)],x[c(FALSE,TRUE)])**2
#100 - nrmsd
fun <- function(x) 100-100*sqrt(mean((x[c(TRUE,FALSE)]-x[c(FALSE,TRUE)])**2))/(max(x)-min(x))
#RMSD
fun <- function(x) log10(sqrt(mean((x[c(TRUE,FALSE)]-x[c(FALSE,TRUE)])**2)))
#Rc
fun <- function(x) {
x1 <- x[c(TRUE,FALSE)]
x2 <- x[c(FALSE,TRUE)]
return( ((sum((x1-mean(x))*(x2-mean(x)))/(length(x1)-1))/(sd(x1)*sd(x2)))**2 )
}
#
fun <- function(x) {
x1 <- x[c(TRUE,FALSE)]
x2 <- x[c(FALSE,TRUE)]
#x1 <- x1/sum(x1)
#x2 <- x2/sum(x2)
( sum((x1-mean(x))*(x2-mean(x))) / ((length(x1)-1)*sd(x1)*sd(x2)) )**2
}
### repeatability pSTAT5 MFI
pdf('~nikolas/Thesis/figures/repeatability-PSTAT5-MFI.pdf',width=10, height=8)
figure.labels <- iter(paste(letters,')',sep=''))
par(mfrow=c(2,2))
# nn.peak.pstat5mfi
d <- ddply( nn.peak.pstat5mfi,c('cell.type'), function(x) {
print(x$cell.type)
x <- x[which(x$individual %in% REPEATS$individual),]
x <- x[which(x$date %in% c(REPEATS$day1, REPEATS$day2)),]
x <- x[order(x$individual),]
print(x)
y <- sapply(grep('^norm2\\.',colnames(x)), function(i) fun(x[,i]))
names(y) <- DOSES
y
})
fun.plot(d,'NN.peak pSTAT5 MFI')
# raw.pstat5 mfi
d <- ddply( raw.pstat5mfi,c('cell.type'), function(x) {
print(x$cell.type)
x <- x[order(x$individual),]
print(x)
y <- sapply(grep('^X',colnames(x)), function(i) fun(x[,i]))
names(y) <- DOSES
y
})
fun.plot(d,'pSTAT5 MFI')
#p1 <- ggplot(melt(d),aes(x=cell.type,y=value,fill=variable))+geom_bar(position='dodge')+ggtitle('pSTAT5 MFI')+scale_fill_manual(values=blues4)+ylim(0,1)+ylab(expression(r^2))
# baseline raw.pstat5 mfi
d <- ddply( baseline.raw.pstat5mfi,c('cell.type'), function(x) {
x <- x[order(x$individual),]
y <- sapply(grep('^X',colnames(x)), function(i) fun(x[,i]))
names(y) <- DOSES
y
})
fun.plot(d,'base pSTAT5 MFI')
#p2 <- ggplot(melt(d),aes(x=cell.type,y=value,fill=variable))+geom_bar(position='dodge')+ggtitle('pSTAT5 MFI base')+scale_fill_manual(values=blues4)+ylim(0,1)+ylab(expression(r^2))
# pstat5 mfi
d <- ddply( nn.pstat5mfi,c('cell.type'), function(x) {
x <- x[order(x$individual),]
y <- sapply(grep('PSTAT5',colnames(x)), function(i) fun(x[,i]))
names(y) <- DOSES
y
})
fun.plot(d,'NN pSTAT5 MFI')
#p3 <- ggplot(melt(d),aes(x=cell.type,y=value,fill=variable))+geom_bar(position='dodge')+ggtitle('pSTAT5 MFI NN')+scale_fill_manual(values=blues4)+ylim(0,1)+ylab(expression(r^2))
# baseline pstat5 mfi
d <- ddply( baseline.nn.pstat5mfi,c('cell.type'), function(x) {
x <- x[order(x$individual),]
y <- sapply(grep('PSTAT5',colnames(x)), function(i) fun(x[,i]))
y <- c(NA,y)
names(y) <- DOSES
y
})
fun.plot(d,'NN base pSTAT5 MFI')
#p4 <- ggplot(melt(d),aes(x=cell.type,y=value,fill=variable))+geom_bar(position='dodge')+ggtitle('pSTAT5 MFI NN base')+scale_fill_manual(values=blues4)+ylim(0,1)+ylab(expression(r^2))
#multiplot(p1, p2, p3, p4, cols=2)
dev.off()
### repeatability % pSTAT5+
pdf('~nikolas/Thesis/figures/repeatability-PSTAT5-pos.pdf',width=10,height=8)
figure.labels <- iter(paste(letters,')',sep=''))
par(mfrow=c(2,2))
# raw.pstat5 pos
d <- ddply( raw.pstat5pos,c('cell.type'), function(x) {
x <- x[order(x$individual),]
y <- sapply(grep('^X',colnames(x)), function(i) fun(x[,i]))
names(y) <- DOSES
y
})
fun.plot(d,'% pSTAT5+')
#p1 <- ggplot(melt(d),aes(x=cell.type,y=value,fill=variable))+geom_bar(position='dodge')+ggtitle('% pSTAT5+')+scale_fill_manual(values=blues4)+ylim(0,1)+ylab(expression(r^2))
#
# pstat5 pos
d <- ddply( nn.pstat5pos,c('cell.type'), function(x) {
x <- x[order(x$individual),]
y <- sapply(grep('PSTAT5',colnames(x)), function(i) fun(x[,i]))
names(y) <- DOSES
y
})
fun.plot(d,'NN % pSTAT5+')
#p2 <- ggplot(melt(d),aes(x=cell.type,y=value,fill=variable))+geom_bar(position='dodge')+ggtitle('% pSTAT5+ NN')+scale_fill_manual(values=blues4)+ylim(0,1)+ylab(expression(r^2))
#d$pheno <- 'pstat5+'
# baseline pstat5 pos
d <- ddply( baseline.nn.pstat5pos,c('cell.type'), function(x) {
x <- x[order(x$individual),]
y <- c(NA, sapply(grep('PSTAT5',colnames(x)), function(i) fun(x[,i])))
names(y) <- DOSES
y
})
fun.plot(d,'NN base % pSTAT5+')
#p3 <- ggplot(melt(d),aes(x=cell.type,y=value,fill=variable))+geom_bar(position='dodge')+ggtitle('% pSTAT5+ NN base')+scale_fill_manual(values=blues4)+ylim(0,1)+ylab(expression(r^2))
#multiplot(p1, p2, p3,cols=3)
dev.off()
# RMSD root mean square deviation
# pstat mfi
cbind(
aggregate(PSTAT5.1 ~ cell.type, aggregate(PSTAT5.1 ~ individual + cell.type, pstat5mfi, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x))),
'PSTAT5.2'=aggregate(PSTAT5.2 ~ cell.type, aggregate(PSTAT5.2 ~ individual + cell.type, pstat5mfi, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'PSTAT5.2'],
'PSTAT5.3'=aggregate(PSTAT5.3 ~ cell.type, aggregate(PSTAT5.3 ~ individual + cell.type, pstat5mfi, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'PSTAT5.3'],
'PSTAT5.4'=aggregate(PSTAT5.4 ~ cell.type, aggregate(PSTAT5.4 ~ individual + cell.type, pstat5mfi, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'PSTAT5.4']
)
# baseline pstat mfi
cbind(
aggregate(diff.PSTAT5.2 ~ cell.type, aggregate(diff.PSTAT5.2 ~ individual + cell.type, baseline.pstat5mfi, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x))),
'diff.PSTAT5.3'=aggregate(diff.PSTAT5.3 ~ cell.type, aggregate(diff.PSTAT5.3 ~ individual + cell.type, baseline.pstat5mfi, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'diff.PSTAT5.3'],
'diff.PSTAT5.4'=aggregate(diff.PSTAT5.4 ~ cell.type, aggregate(diff.PSTAT5.4 ~ individual + cell.type, baseline.pstat5mfi, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'diff.PSTAT5.4']
)
# pstat pos
cbind(
aggregate(PSTAT5.1 ~ cell.type, aggregate(PSTAT5.1 ~ individual + cell.type, pstat5pos, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x))),
'PSTAT5.2'=aggregate(PSTAT5.2 ~ cell.type, aggregate(PSTAT5.2 ~ individual + cell.type, pstat5pos, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'PSTAT5.2'],
'PSTAT5.3'=aggregate(PSTAT5.3 ~ cell.type, aggregate(PSTAT5.3 ~ individual + cell.type, pstat5pos, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'PSTAT5.3'],
'PSTAT5.4'=aggregate(PSTAT5.4 ~ cell.type, aggregate(PSTAT5.4 ~ individual + cell.type, pstat5pos, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'PSTAT5.4']
)
# pstat pos baseline corrected
cbind(
aggregate(diff.PSTAT5.2 ~ cell.type, aggregate(diff.PSTAT5.2 ~ individual + cell.type, baseline.pstat5pos, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x))),
'diff.PSTAT5.3'=aggregate(diff.PSTAT5.3 ~ cell.type, aggregate(diff.PSTAT5.3 ~ individual + cell.type, baseline.pstat5pos, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'diff.PSTAT5.3'],
'diff.PSTAT5.4'=aggregate(diff.PSTAT5.4 ~ cell.type, aggregate(diff.PSTAT5.4 ~ individual + cell.type, baseline.pstat5pos, function(x) (x[1]-x[2])**2), function(x) sqrt(mean(x)))[,'diff.PSTAT5.4']
)
|
053d840f9c29c9404c6d1344f57ee6a41e624f80
|
1fc421ae8d2d0cc87944ec21ea53b37b1ef02544
|
/man/Reserve.t_1.Rd
|
0f79e125fb226ce17e98f59f5e6de2cf7704b1f7
|
[] |
no_license
|
EduardoRamosP/MackNet
|
5f3df28a30385e83c4d3de0eb10606a416499c92
|
1281f90ccad86df2f496b6e1a33aeab18cf81807
|
refs/heads/master
| 2022-12-18T22:17:47.097987
| 2020-09-21T20:30:55
| 2020-09-21T20:30:55
| 296,931,038
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 389
|
rd
|
Reserve.t_1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SupportFunctions_I_Results_ChainLadder.R
\name{Reserve.t_1}
\alias{Reserve.t_1}
\title{Reserve.t_1}
\usage{
Reserve.t_1(Triangle.Incremental)
}
\arguments{
\item{Triangle.Incremental}{Upper and lower incremental triangle.}
}
\value{
Reserve evaluated in t+1.
}
\description{
It calculates the reserve in t+1.
}
|
51d2338ef7bd932060e1507a8ede8ebac49d8070
|
221072e790a97e05eea0debadbe87955b81ae2e1
|
/man/raw_signal-Squiggle-method.Rd
|
8a00ab1c6946188272efbc839fdfd239627c070b
|
[
"Apache-2.0"
] |
permissive
|
Shians/PorexploreR
|
2ca3d93956ba4592564c8019f1a76252a0696283
|
5086f3e704c6c6036a80d103752d9bce984a0e15
|
refs/heads/master
| 2020-06-06T05:51:25.714267
| 2019-06-20T07:44:16
| 2019-06-20T07:44:16
| 192,655,626
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 391
|
rd
|
raw_signal-Squiggle-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Squiggle-methods.R
\docType{methods}
\name{raw_signal,Squiggle-method}
\alias{raw_signal,Squiggle-method}
\title{Extract raw signal}
\usage{
\S4method{raw_signal}{Squiggle}(object)
}
\arguments{
\item{object}{the Squiggle object}
}
\value{
numeric vector of raw signal values
}
\description{
Extract raw signal
}
|
21cbe0ab85c242aa70dafa096bb13931de79029c
|
19c5698bddc8d527b262d6fb01e1691ea13c53c3
|
/man/mergeData.Rd
|
938746958a86ad40672cd56d82d990bb8c6c275d
|
[] |
no_license
|
Michael-L-Miller/CoveRageAnalysis
|
ebcbfebb858708860863bc3bf15ebdebdd827c2e
|
264312ea14ebcc9e0472659167d7718efa1ff5d9
|
refs/heads/master
| 2020-03-11T05:08:51.594754
| 2018-06-20T20:40:42
| 2018-06-20T20:40:42
| 129,794,506
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 242
|
rd
|
mergeData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mergeData.R
\name{mergeData}
\alias{mergeData}
\title{Merge data}
\usage{
mergeData()
}
\description{
This function merges amplicon- and gene-wise coverage data.
}
|
10c804ddcf16270a45a1707fe7ada9ee3d15068a
|
b80536b66314869762808f385257246a26d121c5
|
/DataS1/R/z-decision-functions.R
|
00fcdebc191c46776d39c80b93008c624b38aecb
|
[] |
no_license
|
kbanner14/SuppS2_IrvineEtAl-ecosphere
|
21ad776eafd182f83049808e27949a64b7b6268a
|
fa66b4ada7f2a285c95392ceb3f407cf69abaf13
|
refs/heads/main
| 2023-04-16T19:53:07.992828
| 2022-04-07T19:10:41
| 2022-04-07T19:10:41
| 465,492,153
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,917
|
r
|
z-decision-functions.R
|
#####################################
## Simulation Processing Functions ##
#####################################
# process results with a specific z_cutoff
# for Bayesian models
z_table_nimble <- function(sim_results, z_cutoff = 0.5){
# filter to just look at Z states and estimates
idx_z <- which(substring(sim_results$param, 1,1) == "Z")
sim_results <- sim_results[idx_z, ]
# create species column
sim_results$species <-
sapply(strsplit(sim_results$param, ","),
function(x){
return(as.numeric(substring(x[2], 2,2)))
}
)
n_iter <- max(sim_results$sim_iter)
n_spp <- max(sim_results$species)
n_site <- nrow(sim_results)/(n_iter*n_spp)
# use cutoff to create z-decision based on model,
sim_results$z_decision <- ifelse(sim_results$Mean >
z_cutoff, 1, 0)
# create decision table categories--- decision.truth
sim_results$z_decide.dg <- interaction(sim_results$z_decision,
sim_results$truth)
# check...
# car::some(sim_results[,c(1:4,19,20,13,21)])
# summary data frame
df_summ <- sim_results %>% group_by(species,
sim_iter, z_decide.dg) %>%
summarise(count = length(z_decide.dg),
z_decision = unique(z_decision),
z_dg = unique(truth),
model = model[1])
df_summ <- df_summ[order(df_summ$sim_iter), ]
# count # z = 1 and z = 0 for each species for each sim_iter
df_summ_z <- sim_results %>% group_by(species, sim_iter) %>%
summarise(z_1 = sum(truth), z_0 = n_site - sum(truth))
# join with counts from 2x2 table
df_out <- full_join(df_summ, df_summ_z)
# find cond prob
df_out$cond_prob_z <- ifelse(df_out$z_dg == 1,
df_out$count/df_out$z_1,
df_out$count/df_out$z_0)
return(df_out)
}
# process results from MLESite, with option to add
# more alpha-level decisions based on the LRT p-value
z_table_MLEsite <- function(sim_results, NA_combine = FALSE,
add_alpha_vec = NULL){
# create table for each iteration
# head(sim_results)
if(is.null(add_alpha_vec)){
if(NA_combine == TRUE){
sim_results$mle_decide <- replace_na(sim_results$Y_noFP_sl,
replace = 0)
sim_results$z_decide.dg <- interaction(sim_results$mle_decide,
sim_results$Z_true)
n_site <- nrow(sim_results)/(max(sim_results$sim_iter)*
max(sim_results$species)*length(unique(sim_results$alpha)))
df_summ <- sim_results %>% group_by(z_decide.dg,
species,
sim_iter, alpha) %>%
summarise(count = length(z_decide.dg),
z_dg = unique(Z_true),
model = "MLESite",
alpha = unique(alpha))
df_summ <- df_summ[order(df_summ$sim_iter), ]
} else {
sim_results$z_decide.dg <- interaction(sim_results$Y_noFP_sl,
sim_results$Z_true)
# check...
# car::some(sim_results)
n_site <- nrow(sim_results)/(max(sim_results$sim_iter)*
max(sim_results$species)*length(unique(sim_results$alpha)))
df_summ <- sim_results %>% group_by(z_decide.dg,
species,
sim_iter, alpha) %>%
summarise(count = length(z_decide.dg),
z_decision = unique(Y_noFP_sl),
z_dg = unique(Z_true),
# prop = length(z_decide.dg)/n_site, C
model = "MLESite",
alpha = unique(alpha))
df_summ <- df_summ[order(df_summ$sim_iter), ]
}
} else {
# add new Y_noFP_sl for more alpha vecs
# find out how many rows per alpha
n_row <- nrow(sim_results %>% filter(alpha == 0.05))
# order the output, and grab all of the results
sim_ordered <- sim_results[order(sim_results$alpha), ]
mle_results <- cbind(sim_ordered[1:n_row, c(1:8)], Y_noFP_sl = NA,
sim_ordered[1:n_row, c(10:12)])
n_alpha_add <- length(add_alpha_vec)
mle_add <- data.frame()
for(i in 1:n_alpha_add){
mle_results$alpha <- add_alpha_vec[i]
mle_results$Y_noFP_sl <- ifelse(mle_results$lrt_pvalue_sl < add_alpha_vec[i],
1, 0)
mle_add <- rbind(mle_add, mle_results)
}
sim_results <- rbind(sim_results, mle_add)
if(NA_combine == TRUE){
sim_results$mle_decide <- replace_na(sim_results$Y_noFP_sl,
replace = 0)
sim_results$z_decide.dg <- interaction(sim_results$mle_decide,
sim_results$Z_true)
n_site <- nrow(sim_results)/(max(sim_results$sim_iter)*
max(sim_results$species)*length(unique(sim_results$alpha)))
df_summ <- sim_results %>% group_by(z_decide.dg,
species,
sim_iter, alpha) %>%
summarise(count = length(z_decide.dg),
# got rid of z_decision b/c makes duplicates
z_dg = unique(Z_true),
model = "MLESite",
alpha = unique(alpha))
df_summ <- df_summ[order(df_summ$sim_iter), ]
} else {
sim_results$z_decide.dg <- interaction(sim_results$Y_noFP_sl,
sim_results$Z_true)
# check...
# car::some(sim_results)
n_site <- nrow(sim_results)/(max(sim_results$sim_iter)*
max(sim_results$species)*length(unique(sim_results$alpha)))
df_summ <- sim_results %>% group_by(z_decide.dg,
species,
sim_iter, alpha) %>%
summarise(count = length(z_decide.dg),
z_decision = unique(Y_noFP_sl),
z_dg = unique(Z_true),
# prop = length(z_decide.dg)/n_site, C
model = "MLESite",
alpha = unique(alpha))
df_summ <- df_summ[order(df_summ$sim_iter), ]
}
}
# count # z = 1 and z = 0 for each species for each sim_iter
df_summ_z <- sim_results %>% group_by(species, sim_iter, alpha) %>%
summarise(z_1 = sum(Z_true), z_0 = n_site - sum(Z_true))
# join with counts from 2x2 table
df_out <- full_join(df_summ, df_summ_z)
# find cond prob
df_out$cond_prob_z <- ifelse(df_out$z_dg == 1, df_out$count/df_out$z_1,
df_out$count/df_out$z_0)
return(df_out)
}
# apply results from a simulation run
# to the post processing functions for MLESite and
# Bayesian models, return one data frame with site-level
# decision summaries
table_process <- function(sim_run, z_cutoff = 0.5, NA_combine = FALSE,
add_alpha_vec = NULL){
# set up data frame to summarize decision results
# create lists of tables for each model
ct_det <- z_table_nimble(sim_results = sim_run$CtDetection,
z_cutoff = z_cutoff)
ct_det$alpha <- NA
remove <- z_table_nimble(sim_results = sim_run$Remove,
z_cutoff = z_cutoff)
ct_det$alpha <- NA
naive <- z_table_nimble(sim_results = sim_run$Naive,
z_cutoff = z_cutoff)
ct_det$alpha <- NA
mle <- z_table_MLEsite(sim_results = sim_run$MLESite,
NA_combine = NA_combine, add_alpha_vec = add_alpha_vec)
df_out <- rbind(ct_det, remove, naive, mle)
df_out$model <- factor(df_out$model)
df_out$species <- factor(df_out$species)
df_out$alpha <- factor(df_out$alpha)
return(df_out)
}
# function to create one big data frame with
# conditional probs for site-level decisions
# for MLESite and Bayesian approaches --- calls table process
compare_threshold <- function(sim_run, z_cutoffs = c(0.5, 0.6, 0.7, 0.8),
NA_combine = FALSE, add_alpha_vec = NULL) {
df_compare <- data.frame()
for(i in 1:length(z_cutoffs)){
df <- table_process(sim_run = sim_run, z_cutoff = z_cutoffs[i],
NA_combine = NA_combine, add_alpha_vec = add_alpha_vec)
df$z_cutoff <- z_cutoffs[i]
df_compare <- rbind(df_compare, df)
}
df_compare$z_cutoff <- factor(df_compare$z_cutoff)
df_compare[which(df_compare$model == "MLESite"), "z_cutoff"] <- NA
df_nimble <- df_compare %>% filter(model != "MLESite")
df_mle <- df_compare %>% filter(model == "MLESite")
out_list <- list("nimble_compare" = df_nimble,
"MLESite_compare" = df_mle)
return(out_list)
}
|
bcad1a4bb40416a9de8b0f1a26b82be1aff6a0aa
|
d9440aa266da5fc208d744421cbe2c5c1ec9eaf5
|
/man/forest.dat.Rd
|
d5096796933567315b2342d89bae3090b83435bb
|
[] |
no_license
|
leonpheng/lhplot
|
a76a424aa77823d97b54a2764175a61dd4d4033d
|
c28f035e53c08a2f9d248e05b3d8a3d0f041e977
|
refs/heads/master
| 2023-08-16T11:04:04.315247
| 2023-08-04T19:53:42
| 2023-08-04T19:53:42
| 120,557,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,158
|
rd
|
forest.dat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lhplot.R
\name{forest.dat}
\alias{forest.dat}
\title{Prepare dataset for Forest plot in Shiny App}
\usage{
forest.dat(
data = t,
parameter = c("Cmax..ng.mL.", "AUCtau..ng.h.mL."),
catcov = c("Cohort", "WT_group"),
stats = c("quantile(x,0.5)=mid", "quantile(x,0.05)=lower", "quantile(x,0.95)=upper",
"length(x)=n"),
N = T
)
}
\arguments{
\item{data}{data frame (prepare categorical covariates before hand)}
\item{parameter}{list of parameters}
\item{catcov}{list categorical covariates.}
\item{stats}{define statistics for mid, lower and upper.}
\item{N}{number of subjects to be included}
}
\description{
Generate dataset for coveffectsplot
}
\examples{
dat<-forest.dat(data=t,parameter=c("Cmax..ng.mL.","AUCtau..ng.h.mL."),catcov=c("Cohort","WT_group"),stats=c("quantile(x,0.5)=mid","quantile(x,0.05)=lower","quantile(x,0.95)=upper","length(x)=n"))
Save dataset as CSV then open shiny APP using coveffectsplot::run_interactiveforestplot(). The package could be installed from Github: devtools::install_github('smouksassi/coveffectsplot')
}
\keyword{forest.dat}
|
a2fd5de15828b568d151d8a4f0af49ed54493f2b
|
ab7d15d06ed92cd51cc383dc9e98ae2a8fa41eaa
|
/man/node_aes.Rd
|
617666daec35268f8577f502f44176fc1596378c
|
[
"MIT"
] |
permissive
|
rich-iannone/DiagrammeR
|
14c46eb994eb8de90c50166a5d2d7e0668d3f7c5
|
218705d52d445c5d158a04abf8107b425ea40ce1
|
refs/heads/main
| 2023-08-18T10:32:30.784039
| 2023-05-19T16:33:47
| 2023-05-19T16:33:47
| 28,556,914
| 1,750
| 293
|
NOASSERTION
| 2023-07-10T20:46:28
| 2014-12-28T08:01:15
|
R
|
UTF-8
|
R
| false
| true
| 6,534
|
rd
|
node_aes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/node_aes.R
\name{node_aes}
\alias{node_aes}
\title{Insert node aesthetic attributes during node creation}
\usage{
node_aes(
shape = NULL,
style = NULL,
penwidth = NULL,
color = NULL,
fillcolor = NULL,
image = NULL,
fontname = NULL,
fontsize = NULL,
fontcolor = NULL,
peripheries = NULL,
height = NULL,
width = NULL,
x = NULL,
y = NULL,
group = NULL,
tooltip = NULL,
xlabel = NULL,
URL = NULL,
sides = NULL,
orientation = NULL,
skew = NULL,
distortion = NULL,
gradientangle = NULL,
fixedsize = NULL,
labelloc = NULL,
margin = NULL
)
}
\arguments{
\item{shape}{The shape to use for the node. Some possible \code{shape} types
include: \code{circle}, \code{rectangle}, \code{triangle}, \code{plaintext}, \code{square}, and
\code{polygon}.}
\item{style}{The node line style. The \code{style} types that can be used are:
\code{filled}, \code{invisible}, \code{diagonals}, \code{rounded}, \code{dashed}, \code{dotted}, \code{solid},
and \code{bold}.}
\item{penwidth}{The thickness of the stroke line (in pt units) for the node
shape. The default value is \code{1.0}.}
\item{color}{The color of the node's outline. Can be any of the named colors
that R knows about (obtained using the \code{colors()} function), or, a
hexadecimal color code.}
\item{fillcolor}{The color with which to fill the shape of the node. Can be
any of the named colors that R knows about (obtained using the \code{colors()}
function), or, a hexadecimal color code.}
\item{image}{A reference to an image location.}
\item{fontname}{The name of the system font that will be used for any node
text.}
\item{fontsize}{The point size of the font used for any node text.}
\item{fontcolor}{The color used for any node text. Can be any of the named
colors that R knows about (obtained using the \code{colors()} function), or, a
hexadecimal color code.}
\item{peripheries}{The repeated number of node shapes (of increasing size) to
draw at the node periphery.}
\item{height}{The height of the node shape, in inches. The default value is
\code{0.5} whereas the minimum value is \code{0.02}. This is understood as the
initial, minimum height of the node. If \code{fixedsize} is set to \code{TRUE}, this
will be the final height of the node. Otherwise, if the node label requires
more height to fit, the node's height will be increased to contain the
label.}
\item{width}{The width of the node shape, in inches. The default value is
\code{0.5} whereas the minimum value is \code{0.02}. This is understood as the
initial, minimum width of the node. If \code{fixedsize} is set to \code{TRUE}, this
will be the final width of the node. Otherwise, if the node label requires
more width to fit, the node's width will be increased to contain the label.}
\item{x}{The fixed position of the node in the x direction. Any integer-based
or floating point value will be accepted.}
\item{y}{The fixed position of the node in the y direction. Any integer-based
or floating point value will be accepted.}
\item{group}{The node group.}
\item{tooltip}{Text for a node tooltip.}
\item{xlabel}{External label for a node. The label will be placed outside of
the node but near it. These labels are added after all nodes and edges have
been placed. The labels will be placed so that they do not overlap any node
or label. This means it may not be possible to place all of them.}
\item{URL}{A URL to associate with a node. Upon rendering the plot, clicking
nodes with any associated URLs will open the URL in the default browser.}
\item{sides}{When using the shape \code{polygon}, this value will provide the
number of sides for that polygon.}
\item{orientation}{This is the angle, in degrees, that is used to rotate
nodes that have a \code{shape} of \code{polygon}. Not that for any of the polygon
shapes (set by the \code{sides} node attribute), a value for \code{orientation} that
is \code{0} results in a flat base.}
\item{skew}{A \code{0-1} value that will result in the node shape being skewed to
the right (from bottom to top). A value in the range \code{0} to \code{-1} will skew
the shape to the left.}
\item{distortion}{A distortion factor that is used only when a \code{shape} of
\code{polygon} is used. A \code{0-1} value will increasingly result in the top part
of the node polygon shape to be larger than the bottom. Moving from \code{0}
toward \code{-1} will result in the opposite distortion effect.}
\item{gradientangle}{The path angle for the node color fill gradient.}
\item{fixedsize}{If set to \code{FALSE}, the size of a node is determined by
smallest width and height needed to contain its label, if any, with a
margin specified by the \code{margin} node attribute. The width and height must
also be at least as large as the sizes specified by the \code{width} and
\code{height} node attributes, which specify the minimum values. If set to
\code{TRUE}, the node size is entirely specified by the values of the \code{width}
and \code{height} node attributes (i.e., the node is not expanded in size to
contain the text label).}
\item{labelloc}{Sets the vertical placement of labels for nodes and clusters.
This attribute is used only when the height of the node is larger than the
height of its label. The \code{labelloc} node attribute can be set to either \code{t}
(top), \code{c} (center), or \code{b} (bottom). By default, the label is vertically
centered.}
\item{margin}{Sets the amount of space around the node's label. By default,
the value is \verb{0.11,0.055}.}
}
\description{
This helper function should be invoked to provide values for the namesake
\code{node_aes} argument, which is present in any function where nodes are
created.
}
\examples{
# Create a new graph and add
# a path with several node
# aesthetic attributes
graph <-
create_graph() \%>\%
add_path(
n = 3,
type = "path",
node_aes = node_aes(
shape = "circle",
x = c(1, 3, 2),
y = c(4, -1, 3)
)
)
# View the graph's internal
# node data frame; the node
# aesthetic attributes have
# been inserted
graph \%>\% get_node_df()
# Create a new graph which is
# fully connected
graph <-
create_graph() \%>\%
add_full_graph(
n = 4,
node_data = node_data(value = 1:4),
node_aes = node_aes(
x = c(2, 1, 3, 2),
y = c(3, 2, 2, 1)
),
edge_aes = edge_aes(color = "blue")
)
}
\seealso{
Other Aesthetics:
\code{\link{edge_aes}()},
\code{\link{node_edge_aes_data}}
}
\concept{Aesthetics}
|
76777c1f8aa5c407f337b021c2381c18f4003a4c
|
a41ff9969e033f0f11518813a5d095f79846046e
|
/R/test_and_visuals.R
|
6574c316f0c4030d64875e26dfa13c5066360961
|
[] |
no_license
|
cran/visStatistics
|
1b126f8732f58f8c25f4149432bff7a25ea954d4
|
db39c0b649f9fb607b18e7cfff2b9c9e0927d497
|
refs/heads/master
| 2023-03-10T07:11:11.718383
| 2021-02-12T10:10:02
| 2021-02-12T10:10:02
| 340,028,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 44,277
|
r
|
test_and_visuals.R
|
# MIT License-----
#Copyright (c) 2021 Sabine Schilling
# Plotting functions----
# Testing for normality and visualization ----
test_norm_vis = function(x, y_axis_hist = c(0, 0.04)) {
#store default graphical parameters------
oldparnormvis <- par(no.readonly = TRUE)
on.exit(par(oldparnormvis))
par(mfrow = c(1, 2), oma = c(0, 0, 3, 0))
#Remove NA from x
x <- x[!is.na(x)]
n = length(x)
norm_dens = function(z) {
dnorm(z, mean(x), sd(x))
}
ymax = max(norm_dens(x))
#Plot histogramm of raw data
otto = hist(
x,
freq = FALSE,
col = "grey",
breaks = "Sturges",
xlim = c(
mean(x, na.rm = T) - 5 * sd(x, na.rm = T),
mean(x, na.rm = T) +
5 * sd(x, na.rm = T)
),
ylim = c(0, 1.2 * ymax)
)
maxhist = max(otto$density)
#normal distribution with mean and sd of given distribution
curve(norm_dens,
col = "red",
add = TRUE,
lwd = 2)
#par(new = TRUE) #the next high-level plotting command does not clean the frame before drawing
#as if it were on a new device.
lines(density(x),col = "blue")
legend(
"topright",
c("fitted", "estimated"),
lty = 1,
lwd = 2,
col = c("red", "blue"),
bty = "n"
)
box() #frame around current plot
qqnorm(x)
qqline(x, col = "red", lwd = 2)
KS = ad.test(x)
p_KS = signif(KS$p.value, 2)
SH = shapiro.test(x)
p_SH = signif(SH$p.value, 2)
mtext(
paste(
"Shapiro-Wilk: p = ",
p_SH,
"\n Anderson-Darling: p = ",
p_KS,
"\n Nullhypothesis: Data is normally distributed"
),
outer = TRUE
)
my_list = list("Anderson-Darling" = KS, "Shapiro" = SH)
return(my_list)
}
###### Two-Sample t-Test ###############################
two_sample_tTest = function(samples,
fact,
alternative = c("two.sided", "less", "greater"),
mu = 0,
paired = FALSE,
var.equal = FALSE,
conf.level = 0.95,
samplename = "",
factorname = "")
{
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
alternative <- match.arg(alternative)
if (!missing(mu) && (length(mu) != 1 || is.na(mu)))
return(warning("'mu' must be a single number"))
if (!missing(conf.level) &&
(length(conf.level) != 1 || !is.finite(conf.level) ||
conf.level < 0 || conf.level > 1))
return(warning("'conf.level' must be a single number between 0 and 1"))
alpha = 1 - conf.level
levels = unique(sort(fact))
twosamples = create_two_samples_vector(samples, fact)
x = twosamples$sample1and2
x1 = twosamples$sample1
x2 = twosamples$sample2
#Check normality of both samples-----
p1 = test_norm(twosamples$sample1)
p2 = test_norm(twosamples$sample2)
#margins of y -axis
lower = 0.05
upper = 0.1
margins = calc_min_max_of_y_axis(x, lower, upper)
mi = margins[[1]]
ma = margins[[2]]
x = cbind(x, factor(c(rep(1, length(
x1
)), rep(2, length(
x2
)))))
par(oma = c(0, 0, 3, 0))
b = boxplot(
samples ~ fact,
lwd = 0.5,
xlab = factorname,
ylab = samplename,
ylim = c(mi, ma),
varwidth = T,
col = colorscheme(1)
)
stripchart(
samples ~ fact,
vertical = TRUE,
xlim = c(0, 3),
ylim = c(mi, ma),
#col = c("grey70", "grey80"),
col = colorscheme(2),
axes = FALSE,
method = "jitter",
add = TRUE
)
axis(side = 2)
axis(side = 1,
at = c(1, 2),
labels = levels)
box()
points(1,
mean(x1),
col = 2,
pch = 1,
lwd = 3)
points(2,
mean(x2),
col = 2,
pch = 1,
lwd = 3)
alpha_c = 1 - sqrt(1 - alpha)
#two tests alpha<-0.025, corrects for pairwise testing by increasing the confidence interval from e.g. 95 % to 97.5 %
#corected confidence intervals taking intou co
correction1 = qt(1 - 0.5 * alpha_c, length(x1) - 1) * sd(x1) / sqrt(length(x1))
correction2 = qt(1 - 0.5 * alpha_c, length(x2) - 1) * sd(x2) / sqrt(length(x2))
arrows(
1,
mean(x1, na.rm = T) + correction1,
1,
mean(x1, na.rm = T) - correction1,
angle = 90,
code = 3,
#halbes Konfidenzintervall
col = 2,
lty = 1,
lwd = 2,
length = 0.1
)
arrows(
2,
mean(x2) + correction2,
2,
mean(x2) - correction2,
angle = 90,
code = 3,
col = 2,
lty = 1,
lwd = 2,
length = 0.1
)
abline(
h = mean(x1, na.rm = T) + correction1,
col = "grey30",
lty = 2,
lwd = 1
)
abline(
h = mean(x1, na.rm = T) - correction1,
col = "grey30",
lty = 2,
lwd = 1
)
text(1:length(b$n), c(ma, ma), paste("N=", b$n))
t = t.test(
x1,
x2,
alternative = alternative,
conf.level = conf.level,
paired = FALSE,
var.equal = FALSE,
na.action = na.omit
)
p_value = t$p.value
p_value = signif(p_value, 3)
if (alternative == "two.sided") {
ah = "equals"
} else{
ah = alternative
}
compare = side_of_nh(alternative)
mtext(
paste(
t$method,
"p value = ",
p_value,
"null hypothesis:",
"\n mean",
samplename,
"of",
factorname,
unique(fact)[1],
compare,
"mean",
samplename,
"of",
factorname,
unique(fact)[2]
)
)
my_list <-
list(
"dependent variable (response)" = samplename,
"indepedent variables (parameters)" = unique(fact),
"t-test-statistics" = t,
"Shapiro-Wilk-test_sample1" = p1,
"Shapiro-Wilk-test_sample2" = p2
)
return(my_list)
}
# Two-Sample Wilcoxon-Test ###############################
#One function with flags for greater, less, two sided and notch
two_sample_WilcoxonTest = function(samples,
fact,
alternative = c("two.sided", "less", "greater"),
conf.level = 0.95,
notchf = F,
samplename = "",
factorname = "",
cex = 1) {
oldparwilcox <- par(no.readonly = TRUE) #make a copy of current values
on.exit(par(oldparwilcox))
alternative <- match.arg(alternative)
#Error handling ----
if (!((length(conf.level) == 1L) && is.finite(conf.level) &&
(conf.level > 0) && (conf.level < 1)))
return(warning("'conf.level' must be a single number between 0 and 1"))
if (!is.numeric(samples))
return(warning("'samples' must be numeric"))
if (!is.null(fact)) {
if (!is.factor(fact))
return(warning("'fact' must be factorial"))
}
#Store default graphical parameter
alpha = 1 - conf.level
#Define color palette
colortuple2 = colorscheme(2)
# Create to numeric vectors
twosamples = create_two_samples_vector(samples, fact)
x = twosamples$sample1and2
x1 = twosamples$sample1
x2 = twosamples$sample2
upper = 0.2
lower = 0.05
res = calc_min_max_of_y_axis(x, upper, lower)
mi = res[[1]]
ma = res[[2]]
x = cbind(x,
factor(c(rep(1, length(
x1
)),
rep(2, length(
x2
)))))
b <- boxplot(samples ~ fact, plot = 0) #holds the counts
par(oma = c(0, 0, 3, 0)) #links unten,...
stripchart(
samples ~ fact,
vertical = TRUE,
xlim = c(0, 3),
#ylim = c(mi, ma),
method = "jitter",
col = colorscheme(2),
ylim = c(0, ma),
ylab = samplename,
xlab = factorname
)
boxplot(
samples ~ fact,
notch = notchf,
varwidth = T,
col = colorscheme(1),
ylim = c(0, ma),
add = T
)
#text(1:length(b$n), b$stats[5,]+1, paste("n=", b$n))
text(1:length(b$n), c(ma, ma), paste("N =", b$n))
t = wilcox.test(samples ~ fact, alternative = alternative, na.action = na.omit)
p_value = t$p.value
#p_value = signif(p_value,5)
p_value = formatC(signif(p_value, digits = 2))
compare = side_of_nh(alternative)
if (factorname == "match")
{
prefix = "of matched"
} else{
prefix = character()
}
mtext(
paste(
t$method,
"p=value = ",
p_value,
" null hypothesis:
\n median",
samplename,
"of",
prefix,
unique(fact)[1],
compare,
"median",
samplename,
prefix,
unique(fact)[2]
) ,
cex = cex,
outer = TRUE
)
my_list <-
list(
"dependent variable (response)" = samplename,
"indepedent variables (parameters)" = unique(fact),
"statsWilcoxon" = t,
"statsBoxplot" = b
)
return(my_list)
}
# Two-Sample F-Test ###############################
#subtract means; two lines according to variances.
two_sample_FTest = function(samples,
fact,
conf.int = 0.95,
alternative = "two.sided") {
# if (missing(conf.int)) conf.int = 0.95
# if (missing(alternative)) alternative = "two.sided"
#Store default graphical parameter
oldparftest <- par(no.readonly = TRUE)
on.exit(par(oldparftest))
alpha = 1 - confint
levels = unique(sort(fact))
x1 = samples[fact == levels[1]]
x2 = samples[fact == levels[2]]
x1 = x1 - mean(x1, na.rm = T)
x2 = x2 - mean(x2)
x = c(x1, x2)
spread = max(x) - min(x)
spread = max(spread, var(x1), var(x2))
mi = min(x) - 0.3 * spread
ma = max(x) + 0.3 * spread
x = cbind(x, factor(c(rep(1, length(
x1
)), rep(2, length(
x2
)))))
par(oma = c(0, 0, 3, 0))
stripchart(
x[, 1] ~ x[, 2],
vertical = TRUE,
xlim = c(0.5, 3),
ylim = c(mi, ma),
col = c("grey70", "grey80"),
ylab = "centered samples",
xlab = "",
axes = FALSE
)
axis(side = 2)
axis(side = 1,
at = c(1, 2),
labels = levels)
box()
lines(
x = c(1.1, 1.1),
y = c(-0.5 * var(x1), 0.5 * var(x1)),
col = "blue",
lwd = 5
)
lines(
x = c(1.9, 1.9),
y = c(-0.5 * var(x2), 0.5 * var(x2)),
col = "blue",
lwd = 5
)
legend(
"topright",
inset = 0.05,
c("variances"),
col = c("blue"),
lwd = 2
)
t = var.test(x1, x2, alternative = alternative)
p_value = t$p.value
p_value = signif(p_value, 3)
mtext(
paste(
"Two Sample F-Test (",
alternative,
"): P = ",
p_value,
"\n Confidence Level = ",
1 - alpha
),
outer = TRUE
)
}
#chi squared Test ----
# vis_chi_squared_test: implemented in vis_samples_fact -----
vis_chi_squared_test = function(samples,
fact,
samplename,
factorname,
cex = 1) {
oldparchi <- par(no.readonly = TRUE)
on.exit(par(oldparchi))
colortuple = colorscheme(1)
ColorPalette = colorscheme(3)
if (missing(samplename))
samplename = character()
if (missing(factorname))
factorname = character()
counts = makeTable(samples, fact, samplename, factorname)
check_assumptions_chi = check_assumptions_count_data(samples, fact)
if (check_assumptions_chi == FALSE) {
fisher_chi = counts
return(fisher_chi)
} else{
row_sum = rowSums(counts)
col_sum = colSums(counts)
count_labels = dimnames(counts)[2]
count_labels = as.character(unlist(count_labels))
category_names = dimnames(counts)[1]
category_names = as.character(unlist(category_names))
norm_counts = (counts / row_sum) * 100 #100 %percentage in each group
max_val_y = max(norm_counts, na.rm = T)
#col_vec_browser=c(colortuple,rainbow(nrow(counts)-2, s = 0.5))
if (nrow(counts) < (length(ColorPalette) + 2))
{
col_vec_browser = c(colortuple, head(ColorPalette, n = nrow(counts) - 2))
} else{
col_vec_browser = c(colortuple, rainbow(nrow(counts) - 2, s = 0.4,alpha=1))
}
# x_val = seq(-0.5, ncol(counts) + 0.5, 1)
# y_val = c(0, norm_counts[1, ], 0)
#creates new plot for barplot
par(mfrow = c(1, 1),oma = c(0, 0, 3, 0))
maxlabels = length(levels(samples))
if (maxlabels > 7 |
grepl("basis", samplename) | grepl("source", samplename)
| grepl("basis", factorname) | grepl("source", factorname)
| grepl("genotyped", samplename) |
grepl("genotyped", factorname))
{
labelsize = 0.3 * cex
} else if (maxlabels > 5) {
labelsize = 0.7 * cex
} else{
labelsize = cex
}
fisher_chi = fisher_chi(counts) #checks if Cochran requirements for chi2 are met, if not: only fisher exact test allowed
titletext = paste(fisher_chi$method,
": p-value =",
signif(fisher_chi$p.value, 3),
sep = "")
if (nrow(counts) > 3)
{
ma = max(1.3 * max_val_y)
legendsize = 0.7 * cex
} else{
ma = ma = max(1.1 * max_val_y)
legendsize = cex
}
barplot(
norm_counts,
names.arg = count_labels,
xlim = c(-0.5, ncol(counts) + 1),
ylim = c(0, ma),
width = 1 / (nrow(counts) + 1),
space = c(0, 1),
col = col_vec_browser,
ylab = "%",
xlab = samplename,
beside = TRUE,
cex.axis = 1,
cex.names = labelsize #size of labels of barplot
)
box()
mtext(titletext)
category_names = as.character(category_names)
legend(
"topright",
inset = 0.05,
category_names,
col = col_vec_browser,
bty = 'n',
lwd = 2,
cex = legendsize
)
return(fisher_chi)
}
}
###### Visualize ANOVA ###############################
## performs ANOVA, oneway test and post-hoc t.test
vis_anova = function(samples,
fact,
conf.level = 0.95,
samplename = "",
factorname = "",
cex = 1) {
oldparanova <- par(no.readonly = TRUE)
on.exit(par(oldparanova))
alpha = 1 - conf.level
samples3 = na.omit(samples)
fact <- subset(fact,!is.na(samples))
samples = samples3
n_classes = length(unique(fact))
sdna = function(x)
{
sd(x, na.rm = T)
}
meanna = function(x)
{
mean(x, na.rm = T)
}
s = tapply(samples, fact, sdna)
m = tapply(samples, fact, meanna)
samples_per_class = integer(n_classes)
for (i in 1:n_classes) {
samples_per_class[i] = sum(fact == unique(fact)[i])
}
an = aov(samples ~ fact)
summaryAnova = summary(an)
oneway = oneway.test(samples ~ fact)
maximum = max(samples, na.rm = T)
minimum = min(samples, na.rm = T)
spread = maximum - minimum
mi = minimum - 0.1 * spread
ma = maximum + 0.4 * spread
par(mfrow = c(1, 1), oma = c(0, 0, 3, 0))
stripchart(
samples ~ fact,
vertical = TRUE,
xlim = c(0, n_classes + 1),
ylim = c(mi, ma),
col = rep("grey30", n_classes),
ylab = samplename,
xlab = factorname,
las = 2
)
# sd:
for (i in 1:n_classes) {
lines(
x = c(i - 0.2, i - 0.2),
y = c(m[[i]] - s[[i]], m[[i]] + s[[i]]),
col = colors()[131],
lwd = 5
)
}
for (i in 1:n_classes) {
lines(
x = c(i - 0.1, i + 0.1),
y = c(m[[i]], m[[i]]),
col = colors()[552],
lwd = 3
)
arrows(
i,
m[[i]] + qt(1 - 0.025, samples_per_class[i] - 1) * s[[i]] / sqrt(samples_per_class[i]),
i,
m[[i]] - qt(1 - 0.025, samples_per_class[i] - 1) * s[[i]] / sqrt(samples_per_class[i]),
angle = 90,
code = 3,
col = colors()[552],
lty = 1,
lwd = 2,
length = 0.1
)
}
tuk = TukeyHSD(an)
s = multcompLetters(tuk[[1]][, 4], threshold = alpha)
ord = c()
v = attributes(s$Letters)$names
f_levels = sort(unique(fact))
for (i in 1:n_classes) {
ord[i] = which(v == f_levels[i])
}
text(seq(1:n_classes + 1),
mi,
s$Letters[ord],
col = colors()[81],
lwd = 2)
mtext(paste(
"ANOVA: P = ",
signif(summaryAnova[[1]][["Pr(>F)"]][[1]], 3),
"\n",
"OneWay: P = ",
signif(oneway$p.value, 3)
),
outer = TRUE)
legend(
"top",
inset = 0.05,
horiz = F,
c("mean +- sd ", "mean with 95% conf. intervall"),
col = c(colors()[131], colors()[552]),
bty = 'n',
lwd = 3
)
my_list <-
list(
"ANOVA" = summaryAnova,
"oneway_test" = oneway,
"adjusted_p_values_t_test" = tuk,
"conf.level" = conf.level
)
return(my_list)
}
## Visualize ANOVA assumptions----
### Header vis_anova_asumptions -----
#' Testing ANOVA assumptions
#'
#' \code{vis_anova_assumptions} checks for normality of the standardised residuals of the anova both graphically by qq-plots as well as performing
#' the Shapiro-Wilk-test \code{shapiro.test} and the Anderson-Darling-Test \code{ad.test}.
#' \code{aov} further tests the homoscedacity of each factor level in \code{fact} with the \code{bartlett.test}.
#'
#' @param samples vector containing dependent variable, datatype numeric
#' @param fact vector containing independent variable, datatype factor
#' @param conf.level confidence level, 0.95=default
#' @param samplename name of sample used in graphical output, dataype character , ""=default
#' @param factorname name of sample used in graphical output, dataype character, ""=default
#' @param cex number indicating the amount by which plotting text and symbols should be scaled relative to the default. 1=default, 1.5 is 50\% larger, 0.5 is 50\% smaller, etc.
#'
#' @return my_list: list containing the test statistics of the anova
#' \code{aov(samples~fact)},\code{bartlett.test(samples~fact)} and the tests of normality of the standardized residuals of aov, \code{ks_test} and \code{shapiro_test}
#' @examples
#'ToothGrowth$dose=as.factor(ToothGrowth$dose)
#'vis_anova_assumptions(ToothGrowth$len, ToothGrowth$dose)
#'
#'vis_anova_assumptions(ToothGrowth$len, ToothGrowth$supp)
#'vis_anova_assumptions(iris$Petal.Width,iris$Species)
#' @export vis_anova_assumptions
vis_anova_assumptions = function(samples,
fact,
conf.level = 0.95,
samplename = "",
factorname = "",
cex = 1) {
oldparanovassum <- par(no.readonly = TRUE)
on.exit(par(oldparanovassum))
samples3 = na.omit(samples)
fact <- subset(fact,!is.na(samples))
samples = samples3
anova = aov(samples ~ fact)
summary_anova = summary(anova)
par(mfrow = c(1, 2), oma = c(0, 0, 3, 0))
plot(anova$fitted, rstandard(anova), main = "std. Residuals vs. Fitted")
abline(h = 0, col = 1, lwd = 2)
qqnorm(rstandard(anova))
qqline(rstandard(anova), col = "red", lwd = 2)
par(mfrow = c(1, 1))
#check for normality of standardized residuals
if (length(anova)>7){
ad_test = ad.test(rstandard(anova))
p_AD = signif(ad_test$p.value, 3)}
else{
ad_test="Anderson-Darling test requires sample size of at lest 7."
p_AD=NA
}
shapiro_test = shapiro.test(rstandard(anova))
p_SH = shapiro_test$p.value
bartlett_test = bartlett.test(samples ~ fact)
p_bart = bartlett_test$p.value
mtext(
paste(
"Check for homogeneity of variances:Bartlett Test, p = ",
signif(p_bart, 2),
"\n Check for normality of standardized residuals:\n Shapiro-Wilk: p = ",
signif(p_SH, 2),
"\n Anderson-Darling: p = ",
signif(p_AD, 2)
),
outer = TRUE
)
my_list <-
list(
"shapiro_test" = shapiro_test,
"ad_test" = ad_test,
"summary_anova" = summary_anova,
"bartlett_test"=bartlett_test
)
return(my_list)
}
###### Visualize Kruskal_Wallis ###############################
## performs Kruskal Wallis and post-hoc Wilcoxon:
vis_Kruskal_Wallis_clusters = function(samples,
fact,
conf.level = 0.95,
samplename = "",
factorname = "",
cex = 1,
notch = F) {
oldparkruskal <- par(no.readonly = TRUE)
on.exit(par(oldparkruskal))
alpha = 1 - conf.level
#remove rows with NAs in samples
samples3 = na.omit(samples)
fact <- subset(fact,!is.na(samples))
samples = samples3
n_classes = length(unique(fact))
#define color scheme dependent on number of classes
mc = rainbow(n_classes,alpha = 1)
#mc=ColorPalette(n_classes)
s = tapply(samples, fact, sd)
m = tapply(samples, fact, mean)
samples_per_class = c()
for (i in 1:n_classes) {
samples_per_class[i] = sum(fact == unique(fact)[i])
}
kk = kruskal.test(samples ~ fact)
extramargin = 0.1
margins = calc_min_max_of_y_axis(samples, extramargin, extramargin)
mi = margins[[1]]
ma = margins[[2]]
par(mfrow = c(1, 1), oma = c(1, 0, 1, 0)) #oma: outer margin sout, west, north, east
if (notch == TRUE) {
b = boxplot(
samples ~ fact,
notch = TRUE,
col = mc,
las = 1,
xlim = c(0, n_classes + 1),
ylim = c(mi, ma),
xlab = factorname,
ylab = samplename,
#changes group names size
cex.lab = cex,
cex.axis = 0.8 * cex,
cex.main = cex,
cex.sub = cex,
boxwex = 0.5
)
}
else
{
b = boxplot(
samples ~ fact,
notch = FALSE,
col = mc,
las = 1,
xlim = c(0, n_classes + 1),
ylim = c(mi, ma),
xlab = factorname,
ylab = samplename,
boxwex = 0.5
)
}
stripchart(
samples ~ fact,
vertical = TRUE,
#method="jitter",
col = rep("grey50", n_classes),
# ylab = ylab,
#xlab = xlab,
las = 1,
#horizontal legend,
add = TRUE
)
mtext(c("N = ", b$n), at = c(0.7, seq(1, n_classes)), las = 1) #nmber of cases in each group
tuk = sig_diffs_nongauss(samples, fact)
s = multcompLetters(tuk[[1]][, 4], threshold = alpha)
ord = c()
v = attributes(s$Letters)$names
f_levels = sort(unique(fact))
for (i in 1:n_classes) {
ord[i] = which(v == f_levels[i])
}
(ma)
text(
seq(1:n_classes + 1),
mi,
s$Letters[ord],
col = "darkgreen",
cex = cex,
lwd = 2
)
title(paste(kk$method, "p =", signif(kk$p.value, digits = 3)), outer = TRUE)
my_list <-
list("kruskal_wallis" = kk,
"adjusted_p_values_wilcoxon" = tuk)
return(my_list)
}
##### Visualize Regression und trumpet curves ###############################
vis_regr_trumpets = function(x, y, P) {
oldparreg <- par(no.readonly = TRUE)
on.exit(par(oldparreg))
reg = lm(y ~ x)
summary(reg)
## error bands:
y_conf_low = conf_band(x, reg, P, -1)
y_conf_up = conf_band(x, reg, P, 1)
ma = max(y, reg$fitted)
mi = min(y, reg$fitted)
spread = ma - mi
lower = 0.1
upper = 0.4
margins = calc_min_max_of_y_axis(y, lower, upper)
mi = margins[[1]]
ma = margins[[2]]
par(oma = c(0, 0, 5, 0))
plot(x, y, ylim = c(mi, ma))
points(x,
reg$fitted,
type = "l",
col = 2,
lwd = 2)
points(
x,
y_conf_low,
type = "l",
lwd = 2,
lty = 2,
col = colors()[84]
)
points(
x,
y_conf_up,
type = "l",
lwd = 2,
lty = 2,
col = colors()[84]
)
legend(
"bottomright",
c("regr. line", paste("trumpet curves for gamma=", P)),
lwd = 2,
col = c(2, colors()[84], colors()[85]),
lty = c(1, 2, 3),
bty = "n"
)
s = summary(reg)
mtext(
paste("Regression: ax + b. trumpet curves for gamma = ", P, "\n \n"),
outer = TRUE,
cex = 1.5
)
mtext(
paste(
"\n \n a = ",
signif(reg$coefficients[2], 2),
", p = ",
signif(s$coefficients[2, 4], 2),
"\n b = ",
signif(reg$coefficients[1], 2),
", p = ",
signif(s$coefficients[1, 4], 2),
"\n R^2 = ",
signif(summary(reg)$r.squared, 4)
),
outer = TRUE
)
par(mfrow = c(1, 2), oma = c(0, 0, 3, 0))
plot(
reg$fitted,
residuals(reg),
main = "Residuals vs. Fitted",
xlab = "Fitted Values",
ylab = "Residuals"
)
abline(h = 0, col = 1, lwd = 2)
qqnorm(residuals(reg), ylab = "Sample Quantiles of Residuals")
qqline(residuals(reg), col = "red", lwd = 2)
KS = ad.test(residuals(reg))
p_KS = signif(KS$p.value, 2)
SH = shapiro.test(residuals(reg))
p_SH = signif(SH$p.value, 2)
mtext(
paste(
"Residual Analysis\n Shapiro-Wilk: p = ",
p_SH,
"\n Anderson-Darling: P = ",
p_KS
),
outer = TRUE
)
}
###### Visualize Residuals ###############################
vis_resid = function(resid, fitted) {
oldparresid <- par(no.readonly = TRUE)
on.exit(par(oldparresid))
par(mfrow = c(1, 2), oma = c(0, 0, 3, 0))
plot(fitted, resid, main = "Residuals vs. Fitted")
abline(h = 0, col = 1, lwd = 2)
qqnorm(resid)
qqline(resid, col = "red", lwd = 2)
KS = ad.test(resid)
p_KS = signif(KS$p.value, 2)
SH = shapiro.test(resid)
p_SH = signif(SH$p.value, 2)
mtext(
paste(
"Residual Analysis\n Shapiro-Wilk: p = ",
p_SH,
"\n Anderson-Darling: p = ",
p_KS
),
outer = TRUE
)
}
###### Visualize Regression ###############################
vis_regression_assumptions = function(x,
y,
conf.level = 0.95) {
oldparreg <- par(no.readonly = TRUE)
on.exit(par(oldparreg))
alpha = 1 - conf.level
# P = alpha
#remove all NAs from both vectors
xna <- x[!is.na(y) & !is.na(x)]
yna <- y[!is.na(y) & !is.na(x)]
x <- xna
y <- yna
ord = order(x)
x = sort(x)
y = y[ord]
reg = lm(y ~ x)
resreg = summary(reg)
par(mfrow = c(1, 2), oma = c(0, 0, 4, 0))
plot(
reg$fitted,
rstandard(reg),
main = "std. Residuals vs. Fitted",
xlab = "Fitted Values",
ylab = "Standardized Residuals"
)
abline(h = 0, col = 1, lwd = 2)
qqnorm(rstandard(reg), ylab = "Sample Quantiles of Std. Residuals")
qqline(rstandard(reg), col = "red", lwd = 2)
KS = ad.test(rstandard(lm(y ~ x)))
p_KS = signif(KS$p.value, 2)
SH = shapiro.test(rstandard(lm(y ~ x)))
p_SH = signif(SH$p.value, 2)
if (p_KS < alpha & p_SH < alpha)
{
mtext(
paste(
"Residual Analysis\n Shapiro-Wilk: p = ",
p_SH,
"\n Anderson-Darling: p = ",
p_KS,
"\n Requirements regression not met"
),
outer = TRUE
)
} else{
mtext(
paste(
"Residual Analysis\n Shapiro-Wilk: p = ",
p_SH,
"\n Anderson-Darling: p = ",
p_KS
),
outer = TRUE
)
}
my_list = list(
"summary_regression" = resreg,
"shapiro_test_residuals" = SH,
"ad_test_residuals" = KS
)
return(my_list)
}
vis_regression = function(x,
y,
conf.level = 0.05,
name_of_factor = character(),
name_of_sample = character())
{
oldparregr <- par(no.readonly = TRUE)
on.exit(par(oldparregr))
alpha = 1 - conf.level
P = alpha
#remove all NAs from both vectors
xna <- x[!is.na(y) & !is.na(x)]
yna <- y[!is.na(y) & !is.na(x)]
x <- xna
y <- yna
ord = order(x)
x = sort(x)
y = y[ord]
ylim = 1.1 * max(y, na.rm <- T)
reg = lm(y ~ x)
resreg = summary(reg)
## error bands:
y_conf_low = conf_band(x, reg, P, -1)
y_conf_up = conf_band(x, reg, P, 1)
y_progn_low = progn_band(x, reg, P, -1)
y_progn_up = progn_band(x, reg, P, 1)
ma = max(y, reg$fitted, y_progn_up, na.rm <- T)
mi = min(y, reg$fitted, y_progn_low, na.rm <- T)
spread = ma - mi
par(mfrow = c(1, 1), oma = c(0, 0, 5, 0))
plot(
x,
y,
ylim = c(mi - 0.1 * spread, ma + 0.4 * spread),
xlab = name_of_factor,
ylab = name_of_sample
)
points(x,
reg$fitted,
type = "l",
col = colorscheme(2)[1], #dark green
lwd = 2)
#plot confidence band, lower boundary
points(
x,
y_conf_low,
type = "l",
lwd = 2,
lty = 2,
col = colorscheme(1)[1]
)
#plot confidence band, upper boundary
points(
x,
y_conf_up,
type = "l",
lwd = 2,
lty = 2,
col = colorscheme(1)[1]
)
#plot prognosis band, lower boundary
points(
x,
y_progn_low,
type = "l",
lwd = 2,
lty = 3,
col = colorscheme(1)[2]
)
#plot prognosis band, upper boundary
points(
x,
y_progn_up,
type = "l",
lwd = 2,
lty = 3,
col = colorscheme(1)[2]
)
legend(
"topleft",
horiz=TRUE,
text.width = 0.75,
c("regr. line", "confidence band", "prognosis band"),
lwd = 2, #line width
col = c(colorscheme(2)[1], colorscheme(1)[1],colorscheme(1)[2]),
lty = c(1, 2, 3), #line types of legend
bty = 'n', #no box around legend
cex=0.75 #reduces the legend size
)
s = summary(reg)
b = confint(reg)
KS = ad.test(rstandard(lm(y ~ x)))
SH = shapiro.test(rstandard(lm(y ~ x)))
mtext(
paste(
" regression: y = ax + b \n Confidence = ",
alpha,
", a = ",
signif(reg$coefficients[2], 2),
", interval [",
signif(b[2, 1], 2),
",",
signif(b[2, 2], 2),
"]",
", p = ",
signif(s$coefficients[2, 4], 2),
"\n b = ",
signif(reg$coefficients[1], 2),
", interval [",
signif(b[1, 1], 2),
",",
signif(b[1, 2], 2),
"]",
", p = ",
signif(s$coefficients[1, 4], 2),
"\n adjusted R^2 = ",
signif(s$adj.r.squared, 2)
),
outer = TRUE
)
my_list = list(
"independent variable x"=name_of_factor,
"dependent variable y"=name_of_sample,
"summary_regression" = resreg,
"shapiro_test_residuals" = SH,
"ad_test_residuals" = KS
)
return(my_list)
}
#Mosaic plots-----
vis_mosaic = function(samples,
fact,
name_of_sample = character(),
name_of_factor= character(),
minperc = 0.05,
numbers = TRUE)
{
oldparmosaic <- par(no.readonly = TRUE)
oldparmosaic$new=FALSE
on.exit(par(oldparmosaic))
if (missing(minperc))
{
#minperc is the minimum percehntage a column has to contribute to be displayed
minperc = 0.05
}
if (missing(numbers))
{
#numbers are shown in rectangle of category
numbers = TRUE
}
counts = makeTable(samples, fact,name_of_sample, name_of_factor)
check_assumptions = check_assumptions_count_data(samples, fact)
if (check_assumptions == FALSE)
{
my_list =counts
return(my_list)
}
else{
##Mosaic plot
##The height of the box is the same for all boxes in the same row and
#is equal to the total count in that row.
#
#The width of the box is the proportion of individuals in the row which fall into that cell.
# #Full mosaic plot with all data only if unique number of samples and fact below threshold
maxfactors = max(length(unique(samples)), length(unique(fact)))
threshold = 6
if (length(unique(samples)) < threshold &
length(unique(fact)) < threshold)
{
res = mosaic(
counts,
shade = TRUE,
legend = TRUE, #shows pearsons residual
pop = F
#,main = titletext
)
tab <-
as.table(ifelse(counts < 0.005 * sum(counts), NA, counts))
#puts numbers on count
if (numbers == TRUE) {
labeling_cells(text = tab, margin = 0)(counts)
}
} else{
#
##Elimintate rows and columns distributing less than minperc total number of counts
rowSum = rowSums(counts)
colSum = colSums(counts)
total = sum(counts)
countscolumn_row_reduced = as.table(counts[which(rowSum > minperc * total),
which(colSum > minperc * total)])
#check dimensions after reduction: must be a contingency table
test = dim(as.table(countscolumn_row_reduced))
if (is.na(test[2]))
{
countsreduced = counts
}
else{
countsreduced = countscolumn_row_reduced
}
res = mosaic(
countsreduced,
shade = TRUE,
legend = TRUE,
cex.axis = 50 / maxfactors,
labeling_args = list(gp_labels = (gpar(
fontsize = 70 / maxfactors
))),
# main = titletext,
pop = F
)
if (numbers == TRUE) {
labeling_cells(text = countsreduced, margin = 0)(countsreduced)
}
}
my_list <-
list(
"mosaic_stats" =res
)
return(my_list)
}
}
#Helper functions--------------------------------------
#Check for type of samples and fact
type_sample_fact = function(samples, fact)
{
typesample = class(samples)
typefactor = class(fact)
listsf = list("typesample" = typesample, "typefactor" = typefactor)
return(listsf)
}
#helper function odds ratio
#calculation of odds ratio
odds_ratio = function(a, b, c, d, alpha, zerocorrect) {
attr(odds_ratio, "help") <-
"odds_ratio calculates odds ratio OR=(a/b)/(c/d) and corresponding upper and lower confidence intervalls\n INPUT: a = group 1 positive, c = group 2 positive, b=group 1 non positive, d = group 2 non positive, 1-alpha: confidence level, default alpha=0.05"
# "odds_ratio calculates odds ratio OR=(a/b)/(c/d) and corresponding upper and lower confidence intervalls\n
# INPUT: a=number of positives in group 1, c=group 2 positive, b=group 1 non positive, d =group 2 non positive,default alpha=0.05, OR=(a/b)/(c/d)"\n
# a,b,c,d can be vectors, elementwise calculation
#
if (missing(alpha)) {
alpha = 0.05
}
if (missing(zerocorrect)) {
zerocorrect = TRUE
}
#odds ratio:=OR=a/b/(c/d)
#eliminate columns with zeros
#a=c=0 or b=d 0: no positive or no negative cases in both groups
# Higgins and Green 2011:
if (zerocorrect == TRUE)
{
#eliminate columns with zeros, if
#a=c=0 or b=d=0: no positive or no control cases in BOTH groups
# Higgins and Green 2011:
doublezero = which(a == 0 &
c == 0 | b == 0 & d == 0, arr.ind = T)
a[doublezero] = NaN
b[doublezero] = NaN
c[doublezero] = NaN
d[doublezero] = NaN
#Where zeros cause problems with computation of effects or standard errors, 0.5 is added to all cells (a, b, c, d)
singlezero = which(a == 0 |
b == 0 | c == 0 | d == 0, arr.ind = T)
a[singlezero] = a[singlezero] + 0.5
b[singlezero] = b[singlezero] + 0.5
c[singlezero] = c[singlezero] + 0.5
d[singlezero] = d[singlezero] + 0.5
}
oddA = a / b
oddB = c / d
OR = oddA / oddB
#confidence intervall
#SE of ln(OR)
SE = sqrt(1 / a + 1 / b + 1 / c + 1 / d)
alpha = 0.05
zalph <- qnorm(1 - alpha / 2)
logLOW = log(OR) - zalph * SE
logUP = log(OR) + zalph * SE
lowconf = exp(logLOW) #lower confidence
upconf = exp(logUP)
output = rbind(OR, lowconf, upconf, SE)
my_list = ("odds_ratio_statistics" = output)
return(my_list)
}
#create sorted table
makeTable = function(samples, fact, samplename, factorname)
{
counts = data.frame(fact, samples)
colnames(counts) = c(factorname, samplename)
counts2 = table(counts)
#sort by column sums
counts3 = counts2[, order(colSums(counts2), decreasing = T)]
#sort by row sums
counts4 = counts3[order(rowSums(counts3), decreasing = T),]
#remove columnns with all entries zero
counts4 = counts4[, colSums(counts4 != 0) > 0]
return(counts4)
}
fisher_chi = function(counts)
{
#if Cochran requirements for chi2 not given: fisher test is performed
# if more than 20% of cells have count smaller 5
#
#
if (any(counts == 0) #at least one cell with zero enry
|
sum(counts < 5) / length(counts) > 0.2# more than 20% of cells have count smaller 5
&
#Fisher Tests breaks down for too large tables
dim(counts)[2] < 7) {
#fisher.test
testFisherChi = fisher.test(
counts,
workspace = 1e9,
simulate.p.value = T,
hybrid = F,
B = 1e5
)
} else{
testFisherChi = chisq.test(counts)
}
return(testFisherChi)
}
side_of_nh = function(alternative)
{
if (alternative == "less") {
compare = c(">=")
} else if (alternative == "greater") {
compare = c("<=")
} else
compare = c("equals")
return(compare)
}
create_two_samples_vector = function(samples, fact)
{
#Creates column vector built out of two samples
#samples all in one column
levels = unique(sort(fact))
#two levels
if (length(levels) > 2) {
return(warning(
"warning: create_two_samples_vector: only two level input allowed"
))
} else{
samples1 = samples[fact == levels[1]]
samples1 <- samples1[!is.na(samples1)]
if (length(samples1) == 0) {
return(warning("each group needs at least one entry"))
} else{
samples2 = samples[fact == levels[2]]
samples2 <- samples2[!is.na(samples2)]
if (length(samples2) == 0) {
return(warning("each group needs at least one entry"))
} else {
x = c(samples1, samples2)
my_list = list(
"sample1" = samples1,
"sample2" = samples2,
"sample1and2" = x
)
return(my_list)
}
}
}
}
calc_min_max_of_y_axis = function(samples,
lowerExtramargin,
upperExtramargin)
{
maximum = max(samples, na.rm = T)
minimum = min(samples, na.rm = T)
spread = maximum - minimum
min_y_axis = minimum - lowerExtramargin * spread
max_y_axis = maximum + upperExtramargin * spread
return(list(min_y_axis, max_y_axis))
}
check_assumptions_shapiro = function(x)
{
x <- sort(x[complete.cases(x)])
n <- length(x)
rng <- x[n] - x[1L]#1L is integer
checkSize = !(is.na(n) || n < 3L || n > 5000L) #FALSE or TRUE
if (checkSize == FALSE)
{
warning("sample size must be between 3 and 5000")
return(FALSE)
}
if (rng == 0)
{
warning("all 'x' values are identical")
return(FALSE)
}
return(TRUE)
}
check_assumption_shapiro_size_range_two_samples = function(x1, x2) {
boolean1 = check_assumptions_shapiro(x1)
boolean2 = check_assumptions_shapiro(x2)
if (boolean1 == TRUE & boolean2 == TRUE)
{
return(TRUE)
} else{
return(FALSE)
}
}
check_assumptions_count_data = function(samples, fact)
{
counts = table(samples, fact)
sr <- rowSums(counts)
sc <- colSums(counts)
counts <- counts[sr > 0, sc > 0, drop = FALSE]
nr <- as.integer(nrow(counts))
nc <- as.integer(ncol(counts))
if (is.null(dim(counts)))
{
warning("no entries in count table ")
return(FALSE)
} else if (is.na(nr) || is.na(nc) || is.na(nr * nc))
{
warning("invalid nrow or ncol in count data ", domain = NA)
return(FALSE)
} else if (nr <= 1L)
{
warning("need 2 or more non-zero row marginals")
return(FALSE)
} else if (nc <= 1L)
{
warning("need 2 or more non-zero column marginals")
return(FALSE)
} else{
return(TRUE)
}
}
sig_diffs_nongauss <- function(samples, fact)
{
# function to produce a table similar to that produced for TukeyHSD,
# but for non-normally distributed data
# calculate p values for each data classification based on pairwise.wilcox.test
ufactor = levels(fact)
pwt = pairwise.wilcox.test(samples, fact)
factormeans = matrix(0, length(ufactor), 1)
for (ii in 1:length(ufactor)) {
pos = which(fact == ufactor[ii])
factormeans[ii] = mean(samples[pos])
}
# make a matrix with a row for every possible combination of
# 2 data classifications and populate it with the calculated
# p values
xcomb = combn(length(ufactor), 2)
tukeylike = matrix(0, ncol(xcomb), 4)
colnames(tukeylike) <- c("diff", "lwr", "upr", "p adj")
tukeynames = vector("list", ncol(xcomb))
for (ii in 1:ncol(xcomb)) {
tukeynames[ii] =
paste(ufactor[xcomb[2, ii]], "-", ufactor[xcomb[1, ii]], sep = "")
p_value = pwt$p.value[xcomb[2, ii] - 1, xcomb[1, ii]]
if (is.na(p_value)) {
p_value = 1
}
tukeylike[ii, 4] = p_value
tukeylike[ii, 1] = 0
tukeylike[ii, 2] = 0
tukeylike[ii, 3] = 0
}
rownames(tukeylike) = tukeynames
# re-format the table slightly so it is the same as that produced
# by TukeyHSD and output
tukeylike2 = list(tukeylike)
#print(tukeylike2)
return(tukeylike2)
}
conf_band = function(x, reg, P, up) {
#reg: result of linear regression lm
#up: fact plus or minus
if (missing(P)) {
P = 0.05
}
if (missing(up)) {
up = 1
}
a = reg$coefficients[2]
b = reg$coefficients[1]
md = x - mean(x) #residual
result = x
for (i in 1:length(x)) {
result[i] = a * x[i] + b + up * qt(P, length(x) - 2) * sqrt(sum(reg$resid *
reg$resid) / (length(x) - 2)) * sqrt(1 / (length(x) - 2) + md[i] ^ 2 / sum(md *
md))
}
return(result)
}
progn_band = function(x, reg, P, up) {
if (missing(P)) {
P = 0.05
}
if (missing(up)) {
up = 1
}
a = reg$coefficients[2]
b = reg$coefficients[1]
md = x - mean(x)
result = x
for (i in 1:length(x)) {
result[i] = a * x[i] + b + up * qt(P, length(x) - 2) * sqrt(sum(reg$resid *
reg$resid) / (length(x) - 2)) * sqrt(1 + 1 / (length(x) - 2) + md[i] ^ 2 / sum(md * md))
}
return(result)
}
# Check for normality with Shapiro-Wilk-test without visualization----
test_norm = function(x) {
#Remove NA from x
x <- x[!is.na(x)]
# KS = ks.test(x, pnorm, mean(x), sd(x))
shapiro_wilk_test = shapiro.test(x)
# my_list = list("Kolmogorov-Smirnoff" = KS, "Shapiro" =SH)
return(shapiro_wilk_test)
}
#Check length of distributions for t-test----
check_assumption_sample_size_t_test = function(x1, x2, minimum_size) {
#x1 sample 1
#x2 sample 2
#minimum_size:return TRUE if length> minimum_size
if (length(x1) > minimum_size & length(x2) > minimum_size)
{
return(TRUE)
} else{
return(FALSE)
}
}
#Define color scheme-----
#'\code{colorscheme(x)} selects color scheme of graphical output. Function parameter NULL lists all available color schemes, 1 a color tuple of green and blue
#'2 a color tuple of dark green and turquoi, 3 a colorplaette as defined by RcolorBrewer
#'
#' @param colorcode selects color scheme. parameters NULL: list of all available color schemes, 1: colortuple, 2, colortuple2, 3, ColorPalette
#' @return selected color scheme, colors are given with their Hex Code #RRGGBB names
colorscheme = function(colorcode = NULL)
{
browserLightGreen = "#B8E0B8" #matched part group0
browserLightBlue = "#B3D1EF"#matched part group1
browserLightTurquois = "#B3E1EF"#light turquois
browserDarkGreen = "#5CB85C" #dark green
colortuple = c(browserLightGreen, browserLightBlue)
colortuple2 = c(browserDarkGreen, browserLightTurquois)
#from package RColorBrewer Set 3
ColorPalette = c(
"#8DD3C7" ,
"#FFFFB3" ,
"#BEBADA" ,
"#FB8072",
"#80B1D3",
"#FDB462",
"#B3DE69",
"#FCCDE5",
"#D9D9D9",
"#BC80BD" ,
"#CCEBC5" ,
"#FFED6F"
)
my_list = list(
"colortuple" = colortuple,
"colortuple2" = colortuple2,
"ColorPalette" = ColorPalette
)
if (is.null(colorcode))
{
return(my_list)
}
else if (colorcode == 1) {
return(colortuple)
} else if (colorcode == 2) {
return(colortuple2)
} else if (colorcode == 3) {
return(ColorPalette)
} else{
message("Choose valid parameter: NULL, 1,2 or 3")
}
}
resetPar <- function() {
dev.new
while (!is.null(dev.list())) dev.off() #restores to default values
oldpar <- par(no.readonly = TRUE)
return(oldpar)
}
|
911c2c6da5b3df23388008a038f16a13f909fb5f
|
a2c49abf81454652767dd98812f3276aaf2dfe03
|
/R/dynamr.R
|
c6b74f78919637f4da25b626ef733f5b195217c1
|
[
"MIT"
] |
permissive
|
dnkent/dynamr
|
1e85587e83fd83064012e2af9fbf6e1383c33f50
|
fa34c81aabb95550efdd2324a8f8c6e16daff567
|
refs/heads/master
| 2021-07-15T04:25:16.931134
| 2021-06-29T02:33:38
| 2021-06-29T02:33:38
| 205,764,951
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,115
|
r
|
dynamr.R
|
#' A Permutation-Based Changepoint Technique for Monitoring Effect Sizes
#'
#' \code{dynamr} is used to detect where effect values change temporally when
#' estimating generalized linear models with panel data.
#'
#' @param dat Data frame with panel data of interest
#' @param time_var Name of variable denoting time period for any observation,
#' must be a character
#' @param formula Formula for glm object
#' @param N Number of coefficient samples extracted for calculating stable
#' statistical behavior, defaults to 2000
#' @param window_size Number of time periods per model
#' @param family A description of the error distribution to be used for the
#' fitted glm. Currently, there are three options: "gaussian" - multivariate
#' linear regression; "binomial" - multivariate logistic regression; "poisson"
#' -- multivariate Poisson regression
#'
#' @return
#' @export
#'
#' @examples
#' library(ISLR)
#' library(dynamr)
#'
#' data("Weekly")
#'
#' stock_dynam <- dynamr(
#' dat = Weekly,
#' time_var = "Year",
#' formula = Today ~ Lag1 + Lag2,
#' window_size = 1,
#' family = "gaussian",
#' N = 5000
#' )
#'
#' stock_dynam
#'
dynamr <- function(
dat,
time_var,
formula,
N = 2000,
window_size,
family = c(
"gaussian",
"binomial",
"poisson"
)
) {
require(dplyr)
require(tibble)
# Denote start and end times
start_time <- min(dat[[time_var]], na.rm = TRUE)
end_time <- max(dat[[time_var]], na.rm = TRUE)
# Number of models to fit over time
# + 2 works out to include all time periods
num_models <- end_time - (start_time + window_size) + 2
# Storage for coefficient estimates
coefs <- matrix(
NA,
nrow = N,
ncol = length(all.vars(formula[-2])) + 1 # -2 removes DV, +1 intercept
)
# Create dist of time-invariant coefficients
# For each variable
for (i in 1:N) {
t <- sample(c(start_time:end_time), window_size)
keep_obs <- which(dat[[time_var]] %in% t)
mod <- glm( # model with randomly selected time periods
formula,
data = dat[keep_obs, ],
family = family
)
## Which to extract?
coefs[i, ] <- coef(mod)
}
# Create ncol tolerance regions (shewhart)
region <- 3 * apply(coefs, MARGIN = 2, sd)
# 3 columns per var
# extract variable name for column names
changes <- tibble(
"time" = as.integer(),
"int_est" = as.numeric(),
"int_sd" = as.numeric(),
"pr_int_change" = as.numeric()
)
# Automate dynamic dynamic by formula terms
for (i in 1:length(all.vars(formula[-2]))) {
changes <- changes %>%
add_column(!!
paste0(all.vars(formula[-2])[i], "_est", sep = ""),
) %>%
add_column(!!
paste0(all.vars(formula[-2])[i], "_se", sep = ""),
) %>%
add_column(!!
paste0("pr_", all.vars(formula[-2])[i], "_change", sep = ""),
)
}
# Make columns are numeric vars
changes <- changes %>%
mutate(across(where(is.character), as.numeric))
# Extract coefficients
for(i in 1:num_models){
time_min <- (start_time + i) - 1
time_max <- start_time + window_size + i - 1
temp_obs <- which(
dat[[time_var]] >= time_min &
dat[[time_var]] < time_max
)
temp <- dat[temp_obs, ]
mod <- glm(
formula,
data = temp,
family = family
)
est <- summary(mod)$coefficients[,1:2]
if (i == 1) {
mu <- est[, 1] # coefficients
mu_upper <- mu + region # upper bounds
mu_lower <- mu - region # lower bounds
# Append time -- start the window with middle time
changes[i, 1] <- floor(median(seq(time_min, time_max)))
# Fill in estimates df
# Will work for any number of covariates
for (j in 1:nrow(est)) {
changes[i, 3 * j - 1] <- est[j, 1] # coefficient
changes[i, 3 * j] <- est[j, 2] # sd
changes[i, 3 * j + 1] <- 0 # first point can"t be change
}
}else{
# append time
changes[i, 1] <- floor(median(seq(time_min, time_max)))
# estimates
for (j in 1:nrow(est)) {
changes[i, 3 * j - 1] <- est[j, 1] # coefficient
changes[i, 3 * j] <- est[j, 2] # sd
# pr(observed coef is larger than window"s upper bound)
pr_above_upper <- 1 - pnorm(
as.numeric(mu_upper[j]),
mean = as.numeric(changes[i, 3 * j - 1]),
sd = as.numeric(changes[i, 3 * j])
)
pr_below_lower <- pnorm(
as.numeric(mu_lower[j]),
mean = as.numeric(changes[i, 3 * j - 1]),
sd = as.numeric(changes[i, 3 * j])
)
# Record prob of change
ifelse(
pr_above_upper > pr_below_lower,
changes[i, 3 * j + 1] <- pr_above_upper,
changes[i, 3 * j + 1] <- pr_below_lower
)
# update bounds if so
if (pr_above_upper >= 0.5 | pr_below_lower >= 0.5) {
mu[j] <- as.numeric(changes[i, 3 * j - 1]) # is a change
mu_upper[j] <- mu[j] + region[j]
mu_lower[j] <- mu[j] - region[j]
}
}
}
}
return(changes)
}
|
f27c03e44d0b55ebccd95aba029730bf7dada669
|
41fd1d5722c03eb80f985b92c90e99b199c88422
|
/r4ds_ch3.R
|
acbd72179fb3f3621003caca6995224f3bd26ddd
|
[] |
no_license
|
yan9914/R4DS
|
6036bbb169466fd732c6fcfbc8b40163e5c96e40
|
993381f46063fc52db7ba398956787f724a914c4
|
refs/heads/master
| 2020-04-14T23:51:42.871327
| 2019-08-20T13:26:03
| 2019-08-20T13:26:03
| 164,219,529
| 0
| 0
| null | null | null | null |
BIG5
|
R
| false
| false
| 6,514
|
r
|
r4ds_ch3.R
|
library(nycflights13)
library(tidyverse)
flights
## 以filter()過濾列資料 ##
filter(flights, month == 1, day == 1)
(dec25 <- filter(flights, month == 12, day == 25))
sqrt(2)^2 == 2
1/49*49 == 1
near(sqrt(2)^2, 2)
near(1/49*49, 1)
filter(flights, month == 11 | month == 12)
filter(flights, month == 11|12) # wrong
nov_dec <- filter(flights, month %in% c(11, 12))
# De Morgan's Law
filter(flights, !(arr_delay > 120 | dep_delay > 120))
filter(flights, arr_delay <= 120, dep_delay <= 120)
df <- tibble(x = c(1, NA, 3))
filter(df, x > 1)
filter(df, is.na(x) | x > 1)
# exercise
# 1
filter(flights, arr_delay > 120)
filter(flights, dest == 'IAH' | dest == 'HOU')
filter(flights, carrier %in% c('AA', 'UA', 'DL'))
filter(flights, month %in% c(7, 8, 9))
filter(flights, arr_delay > 120, dep_delay <= 0)
filter(flights, dep_delay >= 60, dep_delay - arr_delay > 30)
filter(flights, dep_time == 2400, dep_time <= 600)
# 2
?between
filter(flights, between(month, 7, 9))
# 3
filter(flights, is.na(dep_time))
# dep_delay, arr_time也是NA
# 應該是班機取消
# 4
NA^0 # for all numeric x, x^0 == 1
NA | TRUE # for all logical or numeric x, (x | TRUE) == TRUE
FALSE & NA # for all logical or numeric x, (FASLE & x) == FALSE
NA*0 # 不等於0, 因為 Inf*0 在R裡無定義
## 以arrange()安排資料列 ##
arrange(flights, year, month, day)
arrange(flights, desc(arr_delay))
# NA永遠排最後
df <- tibble(x = c(5, 2, NA))
arrange(df, x)
arrange(df, desc(x))
# exercise
# 1
arrange(flights, desc(is.na(dep_time)), dep_time)
# 2
arrange(flights, desc(arr_delay))
arrange(flights, dep_delay)
# 3
arrange(flights, desc(distance / air_time))
# 4
arrange(flights, desc(distance))
arrange(flights, distance)
## 以select()挑選資料欄 ##
select(flights, year, month, day)
select(flights, year:day)
select(flights, -(year:day))
# 改名
select(flights, tail_num = tailnum) # 不保留其他變數
rename(flights, tail_num = tailnum) # 會保留其他變數
# 將變數移到最前面
select(flights, time_hour, air_time, everything())
# 將變數移到最後面
select(flights, everything(), time_hour, air_time) # 這樣做是無效的
select(flights, -c(time_hour, air_time), time_hour, air_time)
select(flights, starts_with('dep')) # 變數名稱以dep開頭
select(flights, ends_with('delay')) # 變數名稱以delay結尾
select(flights, contains('time')) # 變數名稱包含time
# 其他輔助函數
# matches('正規表達式') 選擇符合該正規表達式的變數
# num_range('x', 1:3) 挑選x1, x2, x3
# exercise
# 1
select(flights, dep_time, dep_delay, arr_time, arr_delay)
select(flights, starts_with('dep'), starts_with('arr'))
# 2 呼叫過的不會重複呼叫
select(flights, starts_with('dep'), ends_with('delay'))
# 3
?one_of
vars <- c('year', 'month', 'day', 'dep_delay', 'arr_delay')
select(flights, one_of(vars))
# 4
select(flights, contains('TIME')) # 預設不分大小寫
?contains
select(flights, contains('TIME', ignore.case = FALSE))
## 以mutate()新增變數 ##
flights_sml <- select(flights,
year:day,
ends_with('delay'),
distance,
air_time)
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time *60)
mutate(flights_sml,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours) # 可以使用前面新建立的變數
# 只保留新變數
transmute(flights,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours)
# 常用於建立新變數的函數
# 加減乘除,邏輯比較
# %/%, %%
# log() ,log2(), log10()
(x <- 1:10)
lag(x)
lead(x)
cumsum(x)
cummean(x)
?ranking
(y <- c(1, 2, 2, NA, 3, 4))
rank(y)
min_rank(y) # 與rank()有何不同?
min_rank(desc(y))
dense_rank(y)
row_number(y)
percent_rank(y)
cume_dist(y)
ntile(y, 2)
# exercise
# 1
mutate(flights,
dep_time_mins = dep_time%/%100 * 60 + dep_time%%100,
sched_dep_time_mins = sched_dep_time%/%100 * 60 + sched_dep_time%%100)
time2mins <- function(x) x%/%100 * 60 +x%%100
mutate(flights,
dep_time_mins = time2mins(dep_time),
sched_dep_time_mins = time2mins(sched_dep_time))
# 2
transmute(flights, air_time, arr_time - dep_time)
transmute(flights,
air_time,
time2mins(arr_time) - time2mins(dep_time))
# 跨越時區, 造成抵達時間減起飛時間不等於飛行時間
# 時間跨過午夜12點
# 3
select(flights, dep_time, sched_dep_time, dep_delay)
select(flights, dep_time, sched_dep_time, dep_delay) %>%
mutate(time_diff = (time2mins(dep_time) - time2mins(sched_dep_time)),
time_diff - dep_delay) %>%
filter(time_diff - dep_delay != 0)
# 因為有過午夜
# 4
?min_rank
mutate(flights, delay_rank = min_rank(desc(dep_delay))) %>%
filter(delay_rank <= 10) %>%
arrange(delay_rank) %>%
select(delay_rank, everything())
# 5
1:3 + 1:10
# 6
?Trig
## 使用summarize()的分組摘要 ##
summarize(flights, delay = mean(dep_delay, na.rm = TRUE))
by_day <- group_by(flights, year, month, day)
summarize(by_day, delay = mean(dep_delay, na.rm = TRUE))
delays <- flights %>%
group_by(dest) %>%
summarize(
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(count > 20, dest != 'HNL')
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
not_cancelled %>%
group_by(year, month, day) %>%
summarize(mean = mean(dep_delay))
# 進行任何彙總動作時, 最好包含總數或非缺失值的總數
# 確保不是基於非常少量資料而得的結論
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarize(delay = mean(arr_delay))
ggplot(delays, aes(x = delay)) +
geom_freqpoly(binwidth = 10) # 有些飛機的"平均"誤點時間竟長達300分鐘
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarize(
delay = mean(arr_delay, na.rm = TRUE),
n = n()
)
ggplot(delays, aes(x = n, y = delay)) +
geom_point(alpha = 1/10) # 樣本數太低導致出現"平均"為300的極端值
delays %>%
filter(n > 25) %>%
ggplot(aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
|
e01268802bc867940dea84f8cc45608fee07c0f3
|
ee6d07dcacee34e2d2ae890e3a50f34af9e062b8
|
/functions/plot_referrals.R
|
a36ea8a855871ffcdb3cb7d41d2fbd907aea68f6
|
[] |
no_license
|
Public-Health-Scotland/dementia-pds
|
36025977265b0425d410fbdd3198b40bda574f70
|
f9deea8b4abdf128a562661ff1dfe4308fa310cb
|
refs/heads/master
| 2023-04-27T19:38:37.580113
| 2021-05-13T14:32:54
| 2021-05-13T14:32:54
| 198,057,483
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,421
|
r
|
plot_referrals.R
|
plot_referrals <- function(data,
scotland = FALSE,
quarter = NA){
# Determine whether IJBs breakdown is included
ijb_group <- ifelse(scotland == TRUE |
n_distinct(data$ijb) == 1,
FALSE,
TRUE)
# If incomplete financial year, only include complete months
include_months <-
if(is.na(quarter)){1:12}else{
if(quarter == "1"){4:6}else{
if(quarter == "2"){4:9}else{
if(quarter == "3"){4:12}else{
if(quarter == "4"){1:12}
}
}
}
}
if(ijb_group == TRUE){
data %<>%
group_by(fy, month, health_board, ijb) %>%
summarise(referrals = sum(referrals), .groups = "drop")
board <-
data %>%
mutate(ijb = health_board) %>%
group_by(fy, month, health_board, ijb) %>%
summarise(referrals = sum(referrals), .groups = "drop")
data %<>%
bind_rows(board) %>%
ungroup()
data %<>%
filter(!is.na(ijb)) %>%
mutate(ijb = factor(ijb, levels = sort(unique(ijb)))) %>%
mutate(ijb = forcats::fct_relevel(ijb, max(.$health_board)))
}else{
data %<>%
mutate(health_board = ifelse(scotland == TRUE, "Scotland", health_board)) %>%
group_by(fy, month, health_board) %>%
summarise(referrals = sum(referrals), .groups = "drop") %>%
ungroup()
}
data %<>%
mutate(year = if_else(month %in% 1:3,
paste0(substr(fy, 1, 2), substr(fy, 6, 7)),
substr(fy, 1, 4)),
month_full = month(month, label = TRUE, abbr = FALSE),
month_abbr = forcats::fct_relevel(
month(month, label = TRUE),
"Jan", "Feb", "Mar", after = Inf))
plot <- data %>%
ggplot(aes(x = month_abbr,
y = referrals,
group = if(ijb_group == TRUE){ijb}else{1},
colour = if(ijb_group == TRUE){ijb}else{health_board},
text = paste0(if(ijb_group == TRUE){ijb}else{health_board}, "<br>",
month_full, " ", year, "<br>",
"Referrals: ", format(referrals, big.mark = ",")))) +
geom_point() +
geom_line() +
scale_y_continuous(limits = c(0, NA)) +
# Custom labels on x tick marks
scale_x_discrete(labels = paste(levels(data$month_abbr),
c(rep(min(data$year), 9), rep(max(data$year), 3)))) +
labs(x = "Month of Diagnosis",
y = "Number") +
theme(legend.title = element_blank(),
legend.position = ifelse(ijb_group == FALSE, "none", "top"),
axis.text.x = element_text(angle=45))
ggplotly(plot, tooltip = "text") %>%
config(displayModeBar = TRUE,
modeBarButtonsToRemove = list('select2d', 'lasso2d', 'zoomIn2d',
'zoomOut2d', 'autoScale2d',
'toggleSpikelines',
'hoverCompareCartesian',
'hoverClosestCartesian'),
displaylogo = F, editable = F) %>%
layout(legend = list(orientation = "h", x = 0.2 , y = -0.6,
xanchor = "center", yanchor = "bottom"))
}
|
d1eab60e7c274b788adea509cb08dd370e83cc9e
|
858a8d27a2eddd64d60cd392f428eb7164a8c5ce
|
/app.R
|
d1fbc65da883d52ee5ca422e01c6bb281b303d08
|
[] |
no_license
|
rmarasiganjr/My-Fishing-Spot
|
94225cc2dadc75befa56b19c4160657c9798052b
|
83c617abb7e4bd0d9d27994fb2bd56a28cb64520
|
refs/heads/master
| 2020-04-21T05:45:36.747356
| 2019-02-06T04:39:18
| 2019-02-06T04:39:18
| 169,349,667
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 49
|
r
|
app.R
|
shinyApp(ui = htmlTemplate("index.html"), server)
|
9f86bd5fed63cbec2268440394581e3544bdd30d
|
f252c7af74b27070ec28fc6120a72273fce2c39b
|
/Leukimia.R
|
0aa70590eef46628e348d3580f1f706a094e0a92
|
[] |
no_license
|
sadapple/Research
|
bf142ff98a30c28713281aed6a870e19045cb11b
|
a07b5894c0c6be7e5d980ef79730fd8442046a15
|
refs/heads/master
| 2016-09-05T11:36:56.473431
| 2016-02-01T02:22:41
| 2016-02-01T02:22:41
| 28,351,519
| 0
| 0
| null | 2016-01-29T21:25:15
| 2014-12-22T18:05:05
|
R
|
UTF-8
|
R
| false
| false
| 798
|
r
|
Leukimia.R
|
## haha, this line only to test pull request!
## load data from mulltest
install.packages("multtest") ## failed, package removed from CRAN
## so I tried Bioconductor
source("http://bioconductor.org/biocLite.R")
biocLite("multtest")
## find the pooled covariance matrix and the mean difference vector
data(golub)
index <- c(1:27)
X0 <- golub[,index]
X1 <- golub[,-index]
mu0 <- apply(X0, 1, mean)
mu1 <- apply(X1, 1, mean)
X0.center <- X0- mu0
X1.center <- X1 - mu1
Sig <- (X0.center%*%t(X0.center)+X1.center%*%t(X1.center))/ncol(golub)
mu <- mu0 -mu1
min(eigen(Sig)$values)
## make matrix Sig p.d.
## orgininal dat
leuk <- read.table("/media/Learn/Dropbox/Research/datasets/leukemia_Golub/data/data_set_ALL_AML_independent.txt",sep="\t",quote="",header=T,row.names=NULL,comment.char="")
|
b271d5bdf445f2527982ed637915de7c92813fac
|
b6fff2d53f9efc71c65d769950a76f26cea20588
|
/SAT/SAT_summer_work/individual_shiny_apps/3Dplot/3D_plot_0.1/server.R
|
8485d854cf7aedee1a8eec596e6cf31776c3d699
|
[] |
no_license
|
debnolan/DynDocs
|
845b3514771abf0bf917243372f1342a025a3f43
|
17e5950d96ad0bca6440101ffbbc909bb33b6d94
|
refs/heads/master
| 2021-01-10T11:58:02.234230
| 2015-09-28T17:46:22
| 2015-09-28T17:46:22
| 36,089,609
| 4
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 631
|
r
|
server.R
|
# Must be executed BEFORE rgl is loaded on headless devices.
options(rgl.useNULL=TRUE)
library(shiny)
library(shinyRGL)
library(rgl)
load("data/satDF.rda")
shinyServer(function(input, output) {
# Expression that generates a rgl scene with a number of points corresponding
# to the value currently set in the slider.
output$sctPlot <- renderWebGL({
open3d()
plot3d(satDF[, input$x3d], satDF[, input$y3d], satDF[, input$z3d],
col = "blue", size = 0.99, type = "s", xlab = input$x3d,
ylab = input$y3d, zlab = input$z3d)
bbox3d(yat = c(900, 1000, 1100))
aspect3d(1,1,1)
})
})
|
6b4b9d17ca3319e7b1d1a1c039d06a0336808bb3
|
7826e33f5b2525e87a9321d4878be889d9176cbd
|
/utils/update_nowcasts.R
|
cab7701bffd4890ac73d8c27d524cb0d9b528d82
|
[
"MIT"
] |
permissive
|
pucciami/covid
|
ac1b39de38597d50254e3780b567e56c77e01fa5
|
e03af99d4e65fbf37c5e4f11cbb424553ea98bad
|
refs/heads/master
| 2021-05-22T19:52:36.975003
| 2020-04-04T15:09:54
| 2020-04-04T15:09:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
r
|
update_nowcasts.R
|
require(stringr)
require(magrittr)
require(purrr)
nowcasts <- c("global", "national/united-kingdom",
"national/germany", "national/italy",
"national/united-states")
purrr::walk(nowcasts,
function(nowcast) {
source(file.path("_posts", nowcast, "nowcast/update_nowcasts.R"))
})
|
748cd3cad5809c7b6315c98c2cdde73cd23d833d
|
63d50cbf64469abd6d4729ba0266496ced3433cf
|
/chaaya/matrix_transpose.r
|
a4fcac9198e400f5574a3881c4409cd298baad0e
|
[] |
no_license
|
tactlabs/r-samples
|
a391a9a07022ecd66f29e04d15b3d7abeca7ea7c
|
a5d7985fe815a87b31e4eeee739bc2b7c600c9dc
|
refs/heads/master
| 2023-07-08T09:00:59.805757
| 2021-07-25T15:16:20
| 2021-07-25T15:16:20
| 381,659,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
r
|
matrix_transpose.r
|
# Created on
# Course work:
# @author:
# Source:
# function declaration syntax:
# <fn_name> <- function(<parameters>){
# <body>
# }
mat <- matrix(c(1,2,3,4),nrow = 2)
print(" before transpose: ")
print(mat)
t_mat = t(mat)
print(" before transpose: ")
print(t_mat)
|
a2ee1bcb9ee04d538e33700c9f4343f17d900860
|
9089472e1305ce457a5acd759985319f57b274dd
|
/bmsgpk/obsolete.R
|
b7c0042916452c881582b66b316d0275b7dbd055
|
[] |
no_license
|
at062084/COVID-19-Austria
|
2eee6901ea2976e9dfc5b3eb10123e99ed41951e
|
12eaf153420e2cae1c784d9aac5f5c6f1215fe2e
|
refs/heads/master
| 2023-06-25T21:43:05.301057
| 2023-06-12T13:27:26
| 2023-06-12T13:27:26
| 248,818,185
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,495
|
r
|
obsolete.R
|
# geom_line(data=df %>% dplyr::filter(Region=="Wien"), aes(x=Date, y=rm7NewConfTest/rm7NewConfirmed*1000), linetype=1, size=1, inherit.aes=FALSE) +
# daily stats
ggplot(data=dfw, aes(x=Date, y=Count, color=Status, shape=Status)) +
scale_x_date(limits=c(as.Date(strptime("2020-07-01",format="%Y-%m-%d")),NA),
date_breaks="1 weeks", date_labels="%a.%d.%m") +
scale_y_continuous(limits=c(.1,100), breaks=c(seq(.1,1,by=.1),seq(1,10,by=1),seq(10,100,by=10)), trans="log10",
sec.axis = sec_axis(~ . *scaled, breaks=c(seq(10,100,by=10),seq(100,1000,by=100),seq(1000,10000,by=1000)))) +
geom_point(size=2) + geom_line() +
geom_smooth(method="lm", se=FALSE) +
ggtitle("AGES BundesLänder Timeline newConfirmed & newTested Daily: Wien")
ggplot(data=dfat, aes(x=Date, y=Count, color=Status, shape=Status)) +
scale_x_date(limits=c(as.Date(strptime("2020-07-01",format="%Y-%m-%d")),NA),
date_breaks="1 weeks", date_labels="%a.%d.%m") +
scale_y_continuous(limits=c(.05,50), breaks=c(seq(.1,1,by=.1),seq(1,10,by=1),seq(10,100,by=10)), trans="log10",
sec.axis = sec_axis(~ . *scaled, breaks=c(seq(10,100,by=10),seq(100,1000,by=100),seq(1000,10000,by=1000), seq(10000,100000,by=10000)))) +
geom_point(size=2) +
geom_line() +
geom_smooth(method="lm", se=FALSE) +
ggtitle("AGES BundesLänder Timeline newConfirmed/per100.000 perDay")
# Plot spread rate vs current newConfirmed per 100.000
dfrm7 <- dfrm %>% dplyr::filter(Date >= max(Date)-days(7))
dflm7 <- dfrm7 %>% dplyr::arrange(Region,Date) %>%
dplyr::group_by(Region) %>% summarize(lm7 = exp(coef(lm(log(newConfPop)~Date))[2]))
dfrm1 <- dfrm %>% dplyr::filter(Date == max(Date))
dflmrm <- dfrm1 %>% dplyr::left_join(as.data.frame(dflm7), by="Region")
ggplot(data=dflmrm, aes(x=newConfPop, y=lm7, color=Region, shape=Region)) +
scale_shape_manual(values=c(21:25,7,9,10,12,13,14)) +
scale_x_continuous(limits=c(0,65), breaks=seq(0,100,by=10)) +
scale_y_continuous(limits=c(.95,1.2), breaks=seq(0.5,1.5,by=.1)) +
geom_point(aes(size=newConfTest*100), stroke=1.5)
#fltRegion=c("Österreich", "Niederösterreich", "Steiermark", "Burgenland", "Wien")
#begDate=as.Date("2020-03-03")
#fltRegion="Wien"
#begDate=as.Date("2020-03-03")
#begDate=as.Date("2020-06-18")
begDate=as.Date("2020-10-01")
dfrmd <- dfrm %>% dplyr::filter(Date >= begDate)
fltRegion=c("Wien","Österreich")
yLimMin <- .90
yLimMax <- 1.25
# , group=Region, color=Region, shape=Region
ggplot(data=dflm7%>% dplyr::filter(Date>=as.Date("2020-08-01")), aes(x=Date, y=Spread)) +
scale_x_date(date_breaks="1 months", date_labels="%m") +
scale_y_continuous(limits=c(.8,1.2))+
geom_line() +
facet_wrap(.~Region, ncol=5) +
ggtitle("AGES COVID-19 SpreadFactor vs. Date by Region")
ggplot(data=dflm7 %>% dplyr::filter(Date>=as.Date("2020-08-01")), aes(x=newConfPop, y=Spread)) +
scale_x_continuous(limits=c(xLimMin,100), breaks=c(seq(.1,1,by=.1),1:10,seq(10,100,by=10)), trans=xTrans) +
scale_y_continuous(limits=c(.8,1.2), breaks=exp(log(2)/dblDays), labels=dblDays, position="right",
sec.axis=dup_axis(labels=as.character(round(exp(log(2)/dblDays),2)), name="Tägliche Steigerungsrate")) +
geom_path() +
facet_wrap(.~Region, ncol=5) +
ggtitle("AGES COVID-19 SpreadFactor vs. newConfirmed/100.000 by Region")
# curl -X GET "https://api.netatmo.com/api/getpublicdata?lat_ne=16.0&lon_ne=48.0&lat_sw=16.1&lon_sw=48.1&filter=false" -H "accept: application/json"
# curl -X GET "https://api.netatmo.com/api/getpublicdata?lat_ne=16&lon_ne=48&lat_sw=16.1&lon_sw=48.1&filter=false" -H "accept: application/json" -H "Authorization: Bearer 5fa54f3699f37238057c4272|5b3c7efc208af00fe71ccf173fe4b7a3"
1.) Gehen die Fallzahlen nicht um zwei Stufen zurück, steht die nächste große Stufe unmittelbar bevor. Der aktuelle Lockdown muss dafür sorgen, dass die Fallzahlen auf den Stand von Anfang August (ca. 1 positive Testung pro 100.000 pro Tag) zurück geht. Dann sollten bei üblichen Verhaltensregeln sowie einsetzender Wirkung der Impfung drei bis vier Monate ohne weiteren Lockdown möglich sein (bis ca. 10 positive Testungen pro 100.000 pro Tag)
7.) Eine Prognose auf Basis der Entwicklung seit Ende November sieht dieses Ziel aktuell nicht im Bereich des Möglichen.
8.) Es bleibt zu hoffen, dass die weiteren Auswirkungen des harten Lockdown trotz schlechter Akzeptanz in der Bevölkerung ausreichen das Ziel 'Grün' lt. ECDC Ampelschema zu erreichen (Inzidenz ca. 1 pro 100.000 pro Tag)
Schlussfolgerungen:
1.) Es wäre wichtig, dass der ORF diese Zusammenhänge darstellt und die aktuellen Zahlen entsprechend einordnet. Es muss zu jedem Zeitpunkt klar erkennbar sein, wie die aktuelle Situation im eigenen Bundesland ist, wohin sie sich entwickelt, und wie sie lt. Plan sein soll (z.B. Inzidenz 2 im Feber, 4 im März, 8 im April, 16 im Mai, dann HardLockdown 3 Wochen)
2.) Die Angabe von absoluten Zahlen ohne Referenz zu einer Skala (z.B. Ampelschema der ECDC) und ohne Trend sind wenig hilfreich. Eine Darstellung z.B. analog eines einfachen Wetterberichtes (z.B. Windstärke mit Wind/Sturm/Orkan, 'Böen','auffrischendem' und 'abflauendem' Wind, etc.) pro Bundesland könnte technische Daten in anschaulicher Weise vermitteln.
3.) Eine einfache Prognose aus den vergangenen Daten und Vergleich mit den Zielvorgaben sollte nicht fehlen (auch ohne Modellierung von Massnahmen sinnvoll)
Erläuterungen zu den graphischen Darstellungen im Anhang:
1.) Das epidemiologische Geschehen verläuft bei konstanten Massnahmen exponentiell.
2.) Dieser Wirkungsweise wird erst in logarithmischer Darstellung der Fallzahlen sichtbar. Die vom ORF gewählte lineare Darstellung ist nicht geeignet das epidemiologische Geschehen adäquat zu beschreiben.
3.) Die Abweichungen vom mittleren Wachstum sind Wachstumsschübe von einigen/wenigen Wochen, die besser als Stufen denn als Wellen bezeichnet werden müssen (nach einer Welle geht es wieder hinunter)
Damit nicht ab Ende Jänner eine ähnlich große Stufe stattfindet, muss d
Die gewählte Art der graphischen Darstellung macht es schwierig den Entwicklungsstand der Epidemie anhand der aktuellen Zahlen sachgerecht einzuordnen. Das liegt zum einen daran, dass grundlegende Prinzipien der Ausbreitung von COVID-19 nicht vermittelt werden, zum anderen daran, dass Ziele und Prognosen nicht formuliert werden.
# old page
csvF <- "./html/COVID-19-austria.bmsgpk.20210404-1942.html"
csv <- xml2::read_html(csvF)
csv %>% html_nodes(xpath=".//*")
csv %>% html_nodes("table")
csv %>% html_table()
# new page
url <- "https://info.gesundheitsministerium.gv.at/?re=tabelle"
rt <- xml2::read_html(url)
rt %>% html_nodes(xpath=".//*")
rt %>% html_nodes(xpath=".//table[@class='table-faelle']")
rt %>% html_nodes("table.table-faelle")
rt %>% html_nodes(xpath="//html/body/table[3]")
t <- x %>% html_nodes("table.table-faelle")
x %>% html_table()
html_nodes(rt, xpath="html/body/table[1]")
url %>% read_html() %>% html_nodes(xpath=".//*")
url %>% read_html() %>% html_nodes("body") %>% extract2(1)
url %>% read_html() %>% html_nodes("body") %>% extract2(1) %>% html_nodes(xpath=".//*")
url %>% read_html() %>% html_table()
html_table(rt, header=FALSE, dec=".", fill=TRUE)
str(rt)
lego_movie <- read_html("http://www.imdb.com/title/tt1490017/")
cast <- lego_movie %>%
html_nodes("#titleCast .primary_photo img") %>%
html_attr("alt")
cast
|
63baec3c6b71aa770fd851c63068b2685fcb12ca
|
1dfecbca8017ac66df7abd497f86982778628b3a
|
/R/proxies.R
|
9765c3b3ca072f349aa042d411f1bbc85336188c
|
[] |
no_license
|
jimsforks/datamaps
|
ff4972bf64b09b01bf634a29e17c0c76c674f741
|
e186d012f452c64814587ed1ce18a39fdbdcfabf
|
refs/heads/master
| 2022-12-08T11:33:35.297391
| 2020-08-27T09:46:44
| 2020-08-27T09:46:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,378
|
r
|
proxies.R
|
#' Dynamically add bubbles
#'
#' Dynamically add bubble using Shiny.
#'
#' @param proxy a proxy as returned by \code{\link{datamapsProxy}}.
#' @param lon,lat coordinates of bubbles.
#' @param radius radius of bubbles.
#' @param color color of bubbles.
#' @param name name of bubbles.
#' @param ... any other variable to use in tooltip.
#'
#' @examples
#' \dontrun{
#' library(shiny)
#'
#' ui <- fluidPage(
#' numericInput(
#' "lon",
#' "Longitude",
#' value = 50
#' ),
#' numericInput(
#' "lat",
#' "Latitude",
#' value = 50
#' ),
#' textInput(
#' "city",
#' "City",
#' value = "City"
#' ),
#' sliderInput(
#' "value",
#' "Value",
#' min = 1,
#' max = 4,
#' step = 1,
#' value = 3
#' ),
#' actionButton(
#' "sub",
#' "Submit"
#' ),
#' datamapsOutput("map")
#' )
#'
#' server <- function(input, output){
#'
#' coords <- data.frame(city = c("London", "New York", "Beijing", "Sydney"),
#' lon = c(-0.1167218, -73.98002, 116.3883, 151.18518),
#' lat = c(51.49999, 40.74998, 39.92889, -33.92001),
#' values = 1:4)
#'
#' update <- reactive({
#' df <- data.frame(city = input$city, lon = input$lon, lat = input$lat, values = input$value)
#' rbind.data.frame(coords, df)
#' })
#'
#' output$map <- renderDatamaps({
#' coords %>%
#' datamaps() %>%
#' add_bubbles(lon, lat, values * 2, values, city)
#' })
#'
#' observeEvent(input$sub, {
#' datamapsProxy("map") %>%
#' add_data(update()) %>% # pass updated data
#' update_bubbles(lon, lat, values * 2, values, city) # update
#' })
#'
#' }
#'
#' shinyApp(ui, server)
#' }
#'
#' @export
update_bubbles <- function(proxy, lon, lat, radius, color, name, ...){
if(!inherits(proxy, "datamapsProxy"))
stop("must pass proxy, see datamapsProxy.")
data <- get("data", envir = data_env)
col <- eval(substitute(color), data)
lon <- eval(substitute(lon), data)
lat <- eval(substitute(lat), data)
rad <- eval(substitute(radius), data)
nam <- eval(substitute(name), data)
bubbles <- bubbles_data_(lon, lat, rad, col, nam, ...)
data <- list(id = proxy$id, bubbles = bubbles)
proxy$session$sendCustomMessage("update_bubbles", data)
return(proxy)
}
#' Dynamically add bubbles
#'
#' Dynamically add bubbles using Shiny.
#'
#' @param proxy a proxy as returned by \code{\link{datamapsProxy}}.
#' @param locations column containing location names as \code{iso3c}.
#' @param color column containing color of each \code{location}.
#' @param reset reset previous changes to \code{default} color from \code{\link{datamaps}}.
#' @param ... any other variable to use for tooltip.
#'
#' @examples
#' \dontrun{
#' library(shiny)
#'
#' ui <- fluidPage(
#' selectInput(
#' "countrySelect",
#' "Select Country",
#' choices = c("USA", "FRA", "CHN", "RUS", "COG", "DZA", "BRA", "IND")
#' ),
#' sliderInput(
#' "value",
#' "Value",
#' min = 1,
#' max = 10,
#' value = 5
#' ),
#' actionButton("update", "Update"),
#' datamapsOutput("map")
#' )
#'
#' server <- function(input, output){
#'
#' data <- data.frame(name = c("USA", "FRA", "CHN", "RUS", "COG", "DZA", "BRA", "IND", "ALG", "AFG"),
#' color = 1:10)
#'
#' updated_data <- reactive({
#' data.frame(name = input$countrySelect, value = input$value)
#' })
#'
#' output$map <- renderDatamaps({
#' data %>%
#' datamaps() %>%
#' add_choropleth(name, color)
#' })
#'
#' observeEvent(input$update, {
#' datamapsProxy("map") %>%
#' add_data(updated_data()) %>% # pass updated data
#' update_choropleth(name, value, TRUE) # update
#' })
#' }
#'
#' shinyApp(ui, server)
#' }
#'
#' @export
update_choropleth <- function(proxy, locations, color, reset = FALSE, ...){
if(!inherits(proxy, "datamapsProxy"))
stop("must pass proxy, see datamapsProxy.")
if(missing(locations) || missing(color))
stop("missing locations & color")
data <- get("data", envir = data_env)
loc <- eval(substitute(locations), data)
col <- eval(substitute(color), data)
update <- choro_data_(loc, col, ...)
data <- list(id = proxy$id, update = list(data = update, reset = reset))
proxy$session$sendCustomMessage("update_choropleth", data)
return(proxy)
}
#' Dynamically update labels
#'
#' Dynamically update labels using Shiny
#'
#' @param proxy a proxy as returned by \code{\link{datamapsProxy}}.
#' @param label.color color of label.
#' @param line.width with of line.
#' @param font.size size of font label.
#' @param font.family family of font label.
#' @param ... any other option.
#'
#' @examples
#' \dontrun{
#' library(shiny)
#'
#' ui <- fluidPage(
#' actionButton(
#' "update",
#' "update labels"
#' ),
#' datamapsOutput("map")
#' )
#'
#' server <- function(input, output){
#' states <- data.frame(st = c("AR", "NY", "CA", "IL", "CO", "MT", "TX"),
#' val = c(10, 5, 3, 8, 6, 7, 2))
#'
#' output$map <- renderDatamaps({
#' states %>%
#' datamaps(scope = "usa", default = "lightgray") %>%
#' add_choropleth(st, val) %>%
#' add_labels()
#' })
#'
#' observeEvent(input$update, {
#' datamapsProxy("map") %>%
#' update_labels(sample(c("blue", "red", "orange", "green", "white"), 1)) # update
#' })
#' }
#'
#' shinyApp(ui, server)
#' }
#'
#' @export
update_labels <- function(proxy, label.color = "#000", line.width = 1, font.size = 10, font.family = "Verdana", ...){
if(!inherits(proxy, "datamapsProxy"))
stop("must pass proxy, see datamapsProxy.")
opts <- list(...)
opts$labelColor <- label.color
opts$lineWidth <- line.width
opts$fontSize <- font.size
opts$fontFamily <- font.family
data <- list(id = proxy$id, opts = opts)
proxy$session$sendCustomMessage("update_labels", data)
return(proxy)
}
#' Dynamically update legend
#'
#' Dynamically update legend using Shiny
#'
#' @param proxy a proxy as returned by \code{\link{datamapsProxy}}.
#'
#' @examples
#' \dontrun{
#' library(shiny)
#'
#' ui <- fluidPage(
#' actionButton(
#' "show",
#' "Show legend"
#' ),
#' datamapsOutput("map")
#' )
#'
#' server <- function(input, output){
#' states <- data.frame(st = c("AR", "NY", "CA", "IL", "CO", "MT", "TX"),
#' val = c(10, 5, 3, 8, 6, 7, 2))
#'
#' output$map <- renderDatamaps({
#' states %>%
#' datamaps(scope = "usa", default = "lightgray") %>%
#' add_choropleth(st, val)
#' })
#'
#' observeEvent(input$update, {
#' datamapsProxy("map") %>%
#' update_legend() # update
#' })
#' }
#'
#' shinyApp(ui, server)
#' }
#'
#' @export
update_legend <- function(proxy){
if(!inherits(proxy, "datamapsProxy"))
stop("must pass proxy, see datamapsProxy.")
data <- list(id = proxy$id)
proxy$session$sendCustomMessage("update_legend", data)
return(proxy)
}
#' Dynamically update arcs
#'
#' Dynamically update arcs with Shiny.
#'
#' @param proxy a proxy as returned by \code{\link{datamapsProxy}}.
#' @inheritParams add_arcs
#' @inheritParams add_arcs_name
#'
#' @examples
#' \dontrun{
#' library(shiny)
#'
#' ui <- fluidPage(
#'
#' textInput(
#' "from",
#' "Origin",
#' value = "USA"
#' ),
#' textInput(
#' "to",
#' "Destination",
#' value = "RUS"
#' ),
#' actionButton(
#' "submit",
#' "Draw arc"
#' ),
#' datamapsOutput("map")
#' )
#'
#' server <- function(input, output){
#'
#' arc <- reactive({
#' data.frame(from = input$from, to = input$to)
#' })
#'
#' output$map <- renderDatamaps({
#' datamaps()
#' })
#'
#' observeEvent(input$submit, {
#' datamapsProxy("map") %>%
#' add_data(arc()) %>%
#' update_arcs_name(from, to)
#' })
#'
#' }
#'
#' shinyApp(ui, server)
#' }
#'
#' @rdname update_arcs
#' @export
update_arcs <- function(proxy, origin.lon, origin.lat, destination.lon, destination.lat, ...){
if(!inherits(proxy, "datamapsProxy"))
stop("must pass proxy, see datamapsProxy.")
data <- get("data", envir = data_env)
ori.lon <- eval(substitute(origin.lon), data)
ori.lat <- eval(substitute(origin.lat), data)
des.lon <- eval(substitute(destination.lon), data)
des.lat <- eval(substitute(destination.lat), data)
data <- list(id = proxy$id, arcs = arc_data__(ori.lon, ori.lat, des.lon, des.lat, ...))
proxy$session$sendCustomMessage("update_arcs", data)
return(proxy)
}
#' @rdname update_arcs
#' @export
update_arcs_name <- function(proxy, origin, destination, ...){
data <- get("data", envir = data_env)
ori <- eval(substitute(origin), data)
des <- eval(substitute(destination), data)
msg <- list(id = proxy$id, arcs = arc_data_(ori, des, ...))
print(msg)
proxy$session$sendCustomMessage("update_arcs", msg)
return(proxy)
}
#' Remove map
#'
#' Remove the map
#'
#' @param proxy a proxy as returned by \code{\link{datamapsProxy}}.
#'
#' @examples
#' \dontrun{
#' library(shiny)
#'
#' ui <- fluidPage(
#' actionButton(
#' "delete",
#' "Delete map"
#' ),
#' datamapsOutput("map")
#' )
#'
#' server <- function(input, output){
#' output$map <- renderDatamaps({
#' datamaps()
#' })
#' }
#'
#' shinyApp(ui, server)
#' }
#'
#' @export
delete_map <- function(proxy){
msg <- list(id = proxy$id)
proxy$session$sendCustomMessage("delete_map", msg)
return(proxy)
}
|
ad9fcda04c1b79cb77562b950eca4fc3c3b62f15
|
5fa83ec459262563b3b334092da42e3e320bf5a3
|
/steem_crawler.R
|
20abaede3ea1049c07753d26bf6a4367d6ed28b6
|
[] |
no_license
|
ferozah83/Steemit_Monthly_Analytics
|
d8d23f9add566162663158ef1c4245a5839c7569
|
a3f2ff2a16bbd73c904ad72360a9d40843c27bdd
|
refs/heads/master
| 2021-04-28T01:14:42.703194
| 2018-02-20T23:56:06
| 2018-02-20T23:56:06
| 122,271,112
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,076
|
r
|
steem_crawler.R
|
install.packages(c('rvest','httr','KoNLP','stringr','tm','qgraph','xml2'))
install.packages("dplyr")
rm(list=ls())
library(rvest)
library(httr)
library(KoNLP)
library(stringr)
library(tm)
library(qgraph)
library(dplyr)
library('xml2')
for ( x in 1:100){
url_base <- 'https://steemit.com/created/kr'
steem <- read_html(url_base)
steem
#steem_title <- html_nodes(steem, 'div') %>% html_nodes('h2')
steem_title <- html_nodes(steem, 'div') %>% html_nodes('h2') %>% html_nodes('a')
steem_title_text <- html_text(steem_title)
steem_title_link <- html_nodes(steem, 'div') %>% html_nodes('h2') %>% html_nodes('a') %>% html_attr('href')
steemit <- cbind.data.frame(Number=c(1:20),TITLE=steem_title_text , LINK=steem_title_link)
View(steemit)
##class(steem_title_text)
setwd("E:/Study/R")
##final_data = data.frame(steemit,stringsAsFactors = F)
##write.csv(final_data, 'steem_test.csv')
write.table(steemit, file = "steem_test_total.csv", sep = ",", row.names = F, append = TRUE)
Sys.sleep(1800)
}
|
800fdf2917d33e958ee339da6b6b0cb73b3d9625
|
cc3b15cc6e5ca42bf8cb6a241e09aa96c4271a24
|
/man/medoids.Rd
|
addb360e77dc7f5bde51342a81ef8530e1fc512f
|
[] |
no_license
|
larssnip/microcontax
|
a840a6c7f32c16c462de2a5aed97c54f408acc7e
|
bb6fc9ac6545717786d33e4e870095a35decc519
|
refs/heads/master
| 2021-01-15T22:20:30.999000
| 2020-08-11T13:18:19
| 2020-08-11T13:18:19
| 99,899,704
| 1
| 1
| null | 2020-06-12T10:40:00
| 2017-08-10T08:25:27
|
R
|
UTF-8
|
R
| false
| true
| 1,113
|
rd
|
medoids.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{medoids}
\alias{medoids}
\title{The ConTax medoids}
\usage{
data(medoids)
}
\description{
The genus medoids from the ConTax data set.
}
\details{
\code{medoids} is a \code{data.frame} object containing the medoide sequences for each genus in
the ConTax data sets (both \code{contax.trim} and \code{contax.full}).
The medoide sequence in a genus is the sequence having the smallest sum of distance to all other members
of the same genus. Thus, it is the sequence closest to the centre of the genus. The medoids can be used as
the representative of each genus, e.g. for building trees for the entire taxonomy.
The taxonomy information for each sequence can be extracted from the \code{Header} column by the supplied
extractor-functions \code{\link{getDomain}}, \code{\link{getPhylum}},...,\code{\link{getGenus}}.
}
\examples{
data(medoids)
summary(medoids)
}
\seealso{
\code{\link[microcontax.data]{contax.full}}, \code{\link{getDomain}}.
}
\author{
Hilde Vinje, Kristian Hovde Liland, Lars Snipen.
}
|
2c96fafb77b075ec5bf895dffc1eb80ca87b6c57
|
d3495d13f00666d4f65ca25fc113b9e65dccff86
|
/annotation/annotate/annotate_pedigree_af.R
|
2278506ef75ce032ebf20aaedb61b93fbb784d70
|
[] |
no_license
|
valecipriani/DNASeq_pipeline
|
45a991d5e6a69df0f27aecae82ccf72f1277d680
|
f30a1605aa97ecea1c2c2e34035004bea8b1b5e8
|
refs/heads/master
| 2021-01-20T11:21:50.391525
| 2015-11-07T19:59:38
| 2015-11-07T19:59:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,018
|
r
|
annotate_pedigree_af.R
|
#!/usr/bin/env Rscript
err.cat <- function(x) cat(x, '\n', file=stderr())
###
err.cat(dim( d <- read.csv(file('stdin')) ))
#
samples <- gsub('geno\\.','',grep('geno',colnames(d),value=TRUE))
#
pedigree <- 'DNA_pedigree_details.csv'
# pedigree
message('dim of pedigree')
err.cat(nrow(pedigree <- read.csv(pedigree)))
sample.affection <- pedigree[ which( pedigree$uclex.sample %in% samples ), c('uclex.sample','Affection')]
# cases
message('cases')
err.cat(cases <- sample.affection[which(sample.affection$Affection==2),'uclex.sample'])
message('number of cases')
err.cat(length(cases))
# controls
message('controls')
err.cat(controls <- sample.affection[which(sample.affection$Affection==1),'uclex.sample'])
message('number of controls')
err.cat(length(controls))
#group: list of individuals of interest, group.name: subfamily name
calculate <- function(group, group.name){
#With geno. prefix
geno.group <- paste("geno",group,sep=".")
#Genotype columns
group.d.cols <- which(names(d) %in% geno.group)
#Genotypes
group.d.geno <- d[,group.d.cols]
#Number of WT, HET, HOM, MISS
group.wt <- apply(X=group.d.geno,1,function(X){length(which(X==0))})
group.het <- apply(X=group.d.geno,1,function(X){length(which(X==1))})
group.hom <- apply(X=group.d.geno,1,function(X){length(which(X==2))})
group.miss <- apply(X=group.d.geno,1,function(X){length(which(is.na(X)))})
#Allele frequency
group.af <- (group.het+group.hom*2)/(2*(group.wt+group.het+group.hom))
#Mutant frequency (percentage either HET or WT over total)
group.mf <- (group.het+group.hom)/(group.wt+group.het+group.hom)
#As data frame
group.out <- as.data.frame(cbind(group.wt,group.het,group.hom,group.miss,group.af,group.mf))
names(group.out) = paste(group.name,c("WT","HET","HOM","MISS","AF","MF"),sep="_")
return(group.out)
}
ca <- calculate(cases, 'cases')
co <- calculate(controls, 'controls')
d <- cbind(d,ca,co)
write.csv( d , quote=FALSE, file='', row.names=FALSE)
|
95148fd82782c3a2d6ebac13873360b9dfcd9e0e
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.internet.of.things/man/iot_replace_topic_rule.Rd
|
d447d67977d6ec536648b22546a2a5209bb99de5
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 9,953
|
rd
|
iot_replace_topic_rule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_replace_topic_rule}
\alias{iot_replace_topic_rule}
\title{Replaces the rule}
\usage{
iot_replace_topic_rule(ruleName, topicRulePayload)
}
\arguments{
\item{ruleName}{[required] The name of the rule.}
\item{topicRulePayload}{[required] The rule payload.}
}
\value{
An empty list.
}
\description{
Replaces the rule. You must specify all parameters for the new rule.
Creating rules is an administrator-level action. Any user who has
permission to create rules will be able to access data processed by the
rule.
}
\section{Request syntax}{
\preformatted{svc$replace_topic_rule(
ruleName = "string",
topicRulePayload = list(
sql = "string",
description = "string",
actions = list(
list(
dynamoDB = list(
tableName = "string",
roleArn = "string",
operation = "string",
hashKeyField = "string",
hashKeyValue = "string",
hashKeyType = "STRING"|"NUMBER",
rangeKeyField = "string",
rangeKeyValue = "string",
rangeKeyType = "STRING"|"NUMBER",
payloadField = "string"
),
dynamoDBv2 = list(
roleArn = "string",
putItem = list(
tableName = "string"
)
),
lambda = list(
functionArn = "string"
),
sns = list(
targetArn = "string",
roleArn = "string",
messageFormat = "RAW"|"JSON"
),
sqs = list(
roleArn = "string",
queueUrl = "string",
useBase64 = TRUE|FALSE
),
kinesis = list(
roleArn = "string",
streamName = "string",
partitionKey = "string"
),
republish = list(
roleArn = "string",
topic = "string",
qos = 123
),
s3 = list(
roleArn = "string",
bucketName = "string",
key = "string",
cannedAcl = "private"|"public-read"|"public-read-write"|"aws-exec-read"|"authenticated-read"|"bucket-owner-read"|"bucket-owner-full-control"|"log-delivery-write"
),
firehose = list(
roleArn = "string",
deliveryStreamName = "string",
separator = "string",
batchMode = TRUE|FALSE
),
cloudwatchMetric = list(
roleArn = "string",
metricNamespace = "string",
metricName = "string",
metricValue = "string",
metricUnit = "string",
metricTimestamp = "string"
),
cloudwatchAlarm = list(
roleArn = "string",
alarmName = "string",
stateReason = "string",
stateValue = "string"
),
cloudwatchLogs = list(
roleArn = "string",
logGroupName = "string"
),
elasticsearch = list(
roleArn = "string",
endpoint = "string",
index = "string",
type = "string",
id = "string"
),
salesforce = list(
token = "string",
url = "string"
),
iotAnalytics = list(
channelArn = "string",
channelName = "string",
batchMode = TRUE|FALSE,
roleArn = "string"
),
iotEvents = list(
inputName = "string",
messageId = "string",
batchMode = TRUE|FALSE,
roleArn = "string"
),
iotSiteWise = list(
putAssetPropertyValueEntries = list(
list(
entryId = "string",
assetId = "string",
propertyId = "string",
propertyAlias = "string",
propertyValues = list(
list(
value = list(
stringValue = "string",
integerValue = "string",
doubleValue = "string",
booleanValue = "string"
),
timestamp = list(
timeInSeconds = "string",
offsetInNanos = "string"
),
quality = "string"
)
)
)
),
roleArn = "string"
),
stepFunctions = list(
executionNamePrefix = "string",
stateMachineName = "string",
roleArn = "string"
),
timestream = list(
roleArn = "string",
databaseName = "string",
tableName = "string",
dimensions = list(
list(
name = "string",
value = "string"
)
),
timestamp = list(
value = "string",
unit = "string"
)
),
http = list(
url = "string",
confirmationUrl = "string",
headers = list(
list(
key = "string",
value = "string"
)
),
auth = list(
sigv4 = list(
signingRegion = "string",
serviceName = "string",
roleArn = "string"
)
)
),
kafka = list(
destinationArn = "string",
topic = "string",
key = "string",
partition = "string",
clientProperties = list(
"string"
)
)
)
),
ruleDisabled = TRUE|FALSE,
awsIotSqlVersion = "string",
errorAction = list(
dynamoDB = list(
tableName = "string",
roleArn = "string",
operation = "string",
hashKeyField = "string",
hashKeyValue = "string",
hashKeyType = "STRING"|"NUMBER",
rangeKeyField = "string",
rangeKeyValue = "string",
rangeKeyType = "STRING"|"NUMBER",
payloadField = "string"
),
dynamoDBv2 = list(
roleArn = "string",
putItem = list(
tableName = "string"
)
),
lambda = list(
functionArn = "string"
),
sns = list(
targetArn = "string",
roleArn = "string",
messageFormat = "RAW"|"JSON"
),
sqs = list(
roleArn = "string",
queueUrl = "string",
useBase64 = TRUE|FALSE
),
kinesis = list(
roleArn = "string",
streamName = "string",
partitionKey = "string"
),
republish = list(
roleArn = "string",
topic = "string",
qos = 123
),
s3 = list(
roleArn = "string",
bucketName = "string",
key = "string",
cannedAcl = "private"|"public-read"|"public-read-write"|"aws-exec-read"|"authenticated-read"|"bucket-owner-read"|"bucket-owner-full-control"|"log-delivery-write"
),
firehose = list(
roleArn = "string",
deliveryStreamName = "string",
separator = "string",
batchMode = TRUE|FALSE
),
cloudwatchMetric = list(
roleArn = "string",
metricNamespace = "string",
metricName = "string",
metricValue = "string",
metricUnit = "string",
metricTimestamp = "string"
),
cloudwatchAlarm = list(
roleArn = "string",
alarmName = "string",
stateReason = "string",
stateValue = "string"
),
cloudwatchLogs = list(
roleArn = "string",
logGroupName = "string"
),
elasticsearch = list(
roleArn = "string",
endpoint = "string",
index = "string",
type = "string",
id = "string"
),
salesforce = list(
token = "string",
url = "string"
),
iotAnalytics = list(
channelArn = "string",
channelName = "string",
batchMode = TRUE|FALSE,
roleArn = "string"
),
iotEvents = list(
inputName = "string",
messageId = "string",
batchMode = TRUE|FALSE,
roleArn = "string"
),
iotSiteWise = list(
putAssetPropertyValueEntries = list(
list(
entryId = "string",
assetId = "string",
propertyId = "string",
propertyAlias = "string",
propertyValues = list(
list(
value = list(
stringValue = "string",
integerValue = "string",
doubleValue = "string",
booleanValue = "string"
),
timestamp = list(
timeInSeconds = "string",
offsetInNanos = "string"
),
quality = "string"
)
)
)
),
roleArn = "string"
),
stepFunctions = list(
executionNamePrefix = "string",
stateMachineName = "string",
roleArn = "string"
),
timestream = list(
roleArn = "string",
databaseName = "string",
tableName = "string",
dimensions = list(
list(
name = "string",
value = "string"
)
),
timestamp = list(
value = "string",
unit = "string"
)
),
http = list(
url = "string",
confirmationUrl = "string",
headers = list(
list(
key = "string",
value = "string"
)
),
auth = list(
sigv4 = list(
signingRegion = "string",
serviceName = "string",
roleArn = "string"
)
)
),
kafka = list(
destinationArn = "string",
topic = "string",
key = "string",
partition = "string",
clientProperties = list(
"string"
)
)
)
)
)
}
}
\keyword{internal}
|
6f5f1191f7cff8705222aef981b1577ecca0d8d6
|
54796cc65f9afd8043705375eb0755f27834761f
|
/linregpackage/tests/testthat/test_linreg.R
|
de100ea15f60865b60fdb6a44f690b676ce1f254
|
[] |
no_license
|
camgu844/Lab4
|
ec574a960617f3a4cb7c35b93be847a1f6c89de1
|
77a1c8bc1b455d094653bfe1c31b7b4f040d1b6d
|
refs/heads/master
| 2021-03-12T23:34:39.183936
| 2015-10-05T19:49:00
| 2015-10-05T19:49:00
| 42,175,055
| 0
| 1
| null | 2015-09-18T10:58:45
| 2015-09-09T11:44:05
|
R
|
UTF-8
|
R
| false
| false
| 232
|
r
|
test_linreg.R
|
library(linregpackage)
context("tests the function linreg")
data(faithful)
formula <- eruptions ~ waiting
data <- faithful
test_that("Class returned by linreg", {
expect_that(class(linreg(formula, data)), equals("linreg"))
})
|
954c702083eaa3bd5bc815e97bd2339890dcda6d
|
85afd9a0e2bb8b97d2b1e507b8beadd18c24b1c6
|
/figure/scripts/12 - Appendix_7_absolute_k_nitrogen.R
|
31059ef3bad8fe29c17d249465f90e33fb63e1f8
|
[
"MIT"
] |
permissive
|
SrivastavaLab/ecological-responses-altered-rainfall
|
e7280c61777ed373ce4c33d3550ddcd2cc8daae4
|
11b3b47a134a329eef29abc6c80bc3ffc87f68ea
|
refs/heads/master
| 2021-08-31T15:50:15.085311
| 2017-12-21T23:33:03
| 2017-12-21T23:33:03
| 114,942,363
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,599
|
r
|
12 - Appendix_7_absolute_k_nitrogen.R
|
# clean up the environment ------------------------------------------------------------------------------------------------------------
rm(list=ls(all=TRUE))
# load packages -----------------------------------------------------------------------------------------------------------------------
library(tidyverse)
library(grid)
library(gridExtra)
library(scales)
# load data ---------------------------------------------------------------------------------------------------------------------------
Appendix7_data <- readRDS("figure/data/Appendix7_summary_list.rds")
# general theme for ggplot2 -----------------------------------------------------------------------------------------------------------
general_theme <- theme(panel.border = element_blank(),
panel.background = element_blank(),
axis.title = element_text(face = "bold", size = 10, colour = "black"),
axis.text = element_text(size = 8, colour = "black"),
axis.line.x = element_line(colour = "black", size = 0.2),
axis.line.y = element_line(colour = "black", size = 0.2),
panel.grid = element_blank(),
axis.title.x = element_text(margin = margin(10, 0, 0, 0)),
axis.title.y = element_text(margin = margin(0, 3, 0, 0)))
# colors ------------------------------------------------------------------------------------------------------------------------------
# c("plum4", "darkorange2", "forestgreen", "deepskyblue1", "mediumblue", "lawngreen", "firebrick1")
# ARGENTINA, CARDOSO, COLOMBIA, COSTA RICA, FRENCH GUIANA, MACAE, PUERTO RICO
# figure ------------------------------------------------------------------------------------------------------------------------------
#DS corrected code to have linear values of abs k placed on a log x axis
appendix7 <- ggplot() +
geom_point(data = Appendix7_data$abs_k_Nfig$res, mapping = aes(x = abs(intended.k), y = (visregRes^(1/0.125)) - 4, fill = site),
colour = "black", shape = 21, size = 1.5, alpha = 0.3) +
geom_line(data = Appendix7_data$abs_k_Nfig$fit, mapping = aes(x = abs(intended.k), y = (visregFit^(1/0.125)) - 4, colour = site), size = 0.6) +
scale_y_continuous(name = expression(bold(paste("Detrital nitrogen uptake (",{delta}^15*N,")"))),
trans = trans_new("eight", function(x) (x + 4)^0.125, function(x) (x^8) - 4,
breaks = extended_breaks(n = 10),
format = scientific_format(digits = 0)),
breaks = c(1e-1, 2e1, 5e1, 1e2, 2e2),
labels = c("1e-1", "2e1", "5e1", "1e2", "2e2")) +
scale_x_continuous(name = "Absolute rainfall dispersion (k)", breaks = c(0.03, 0.1, 0.3,1), labels = c(0.03, 0.1, 0.3, 1), trans = "log") +
scale_colour_manual(values = c("plum4", "darkorange2", "forestgreen", "deepskyblue1", "mediumblue", "lawngreen", "firebrick1")) +
scale_fill_manual(values = c("plum4", "darkorange2", "forestgreen", "deepskyblue1", "mediumblue", "lawngreen", "firebrick1")) +
general_theme +
theme(legend.position = "none")
appendix7
# saving figure -----------------------------------------------------------------------------------------------------------------------
ggsave(filename = "figure/figures/Appendix_7.pdf", plot = appendix7, width = 89, height = 89, units = "mm", dpi = 300)
ggsave(filename = "figure/figures/Appendix_7.png", plot = appendix7, width = 89, height = 89, units = "mm", dpi = 600)
|
bcd0d57aea9672323f31db4f0c044c407e94798a
|
ce3bc493274116150497e73aa7539fef1c07442a
|
/man/queryDB.Rd
|
d2482b1c5c7442c622819734c76fa707530cf4d9
|
[] |
no_license
|
laresbernardo/lares
|
6c67ff84a60efd53be98d05784a697357bd66626
|
8883d6ef3c3f41d092599ffbdd4c9c352a9becef
|
refs/heads/main
| 2023-08-10T06:26:45.114342
| 2023-07-27T23:47:30
| 2023-07-27T23:48:57
| 141,465,288
| 235
| 61
| null | 2023-07-27T15:58:31
| 2018-07-18T17:04:39
|
R
|
UTF-8
|
R
| false
| true
| 1,002
|
rd
|
queryDB.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/querieSQL.R
\name{queryDB}
\alias{queryDB}
\title{PostgreSQL Queries on Database (Read)}
\usage{
queryDB(query, from, creds = NA)
}
\arguments{
\item{query}{Character. SQL Query}
\item{from}{Character. Credential's user (see \code{get_creds()})}
\item{creds}{Character. Credential's directory (see \code{get_creds()})}
}
\value{
data.frame. Result of fetching the \code{query} data.
}
\description{
This function lets the user query a PostgreSQL database. Previously
was called \code{queryDummy} but was replaced and deprecated for a
more general function by using the \code{from} parameter.
}
\seealso{
Other Credentials:
\code{\link{db_download}()},
\code{\link{db_upload}()},
\code{\link{get_credentials}()},
\code{\link{get_tweets}()},
\code{\link{mail_send}()},
\code{\link{queryGA}()},
\code{\link{slackSend}()},
\code{\link{stocks_file}()},
\code{\link{stocks_report}()}
}
\concept{Credentials}
\concept{Database}
|
7902ea1f26e6b5cff14c23a861bdca5229f0d633
|
2e83b3630df102c1581cab01ba15b477d50dacfb
|
/plotboxScript.R
|
de98228c2ddbd2d9855baf69c92a4cfbedb7373d
|
[] |
no_license
|
pacoraggio/MelbourneHousePriceShinyApp
|
b543f50a5479b717ccb03799771cd22be75c4825
|
533c5cdd84184c217f4fb2c119cecff9129ac65c
|
refs/heads/master
| 2022-04-07T17:36:07.857074
| 2020-02-14T16:35:40
| 2020-02-14T16:35:40
| 238,421,217
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
plotboxScript.R
|
# Scripting for testing plotbox configurations
library(plotly)
region <- c("Select a Region",
"Northern Metropolitan",
"Western Metropolitan",
"Southern Metropolitan",
"Eastern Metropolitan",
"South-Eastern Metropolitan",
"Eastern Victoria",
"Northern Victoria",
"Western Victoria",
"All")
n.rooms <- 4
n.bath <- 2
t.type <- "h"
df.mel1 <- df.mel[which(df.mel$Rooms == n.rooms &
df.mel$Bathroom == n.bath &
df.mel$Type == t.type),]
r.region <- region[9]
if(r.region == "Select a Region")
{
print(r.region)
} else if (r.region == "All")
{
print(r.region)
df.mel2 <- df.mel[which(df.mel$Rooms == n.rooms &
df.mel$Bathroom == n.bath &
df.mel$Type == t.type),]
}else
{
df.mel2 <- df.mel[which(df.mel$Rooms == n.rooms &
df.mel$Bathroom == n.bath &
df.mel$Type == t.type &
df.mel$Regionname == r.region),]
}
p <- plot_ly(y = ~df.mel1$Price,
type = "box") %>%
add_trace(y = ~df.mel2$Price)
p
# rsconnect::setAccountInfo(name='pacoraggio',
# token='1C95A63CF4106221EE1B090217430620',
# secret='u9sx+jPOVcYZyTcmWfZUsqIpIT4f1CMppQ4Ko6ES')
#
# rsconnect::deployApp('C:/Users/pacor/Desktop/Paolo/WIP/Coursera/09_Developing_Data_Products-master/GIT/Week4Assignment/Beta/MelbourneHousePriceShinyApp')
|
861c2a2a94f2be4bd193b3f76e1340507b39dc78
|
80355e30ba75884de9bf4091ad49caef52dd8dad
|
/hw04.R
|
69ad7f71b48b1a145f4e7a6e81891a938e9d1afa
|
[
"BSD-2-Clause"
] |
permissive
|
maikenek/stata-to-r
|
491252c31fae87be198d6b022adf4f907672afad
|
e06f2813d4a0c4a01fa587e58b331512c2fb6d17
|
refs/heads/master
| 2020-03-12T14:58:10.431634
| 2018-05-15T11:48:54
| 2018-05-15T11:48:54
| 130,680,336
| 0
| 0
| null | 2018-04-23T10:32:05
| 2018-04-23T10:32:04
| null |
UTF-8
|
R
| false
| false
| 10,751
|
r
|
hw04.R
|
# ==============================================================================
# File name: hw04.do
# Task: Homework
# Project: Research seminar pt. 1
# Author: YOUR NAME HERE
# ==============================================================================
#
#
# ------------------------------------------------------------------------------
# About this dofile:
# 0) Load and check data
# 1) Editing missing values
# 2) Recode
# 3) Generate and replace
# 4) Labels
# 5) Sort and save data
# ------------------------------------------------------------------------------
#
#
# ------------------------------------------------------------------------------
# Notes:
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
# 0) Load and check data
# Tasks:
# a. Load the data combi.dta
# b. Perform some data checks to find out how missing values are coded.
# ------------------------------------------------------------------------------*/
# a. Load the data combi.dta
combi <- read.dta13(file.path(pdata, "combi.dta"))
# b. Perform some data checks to find out how missing values are coded.
View(combi)
stat.desc(combi)
table(combi$math) # values smaller than 0 are missings, but R only reports those coded as NA (in Stata file -999) as missings
# ------------------------------------------------------------------------------
# 1) Missing values
# Task:
# Recode all numerical missing values (except NA) to NA.
# ------------------------------------------------------------------------------
for (val in names(combi)){
combi[[val]][combi[[val]]<0] <- NA # [[]] used instead of $ to call variable
}
View(combi)
# ------------------------------------------------------------------------------
# #2 Recode
# Besides simple recoding, also new variables and new variable labels can be
# created.
#
# Additional tasks:
# a. Recode the variables hgcrev_b, spousgrade_b, partgrade_b, hgcrev_b,
# spousgrade_b, and partgrade_b so that 0/11 become 1 (No degree), 12 becomes
# 2 (Highschool degree), 13/15 become 3 (Some college), and 16/20 become 4
# (4-year college degree), generating new variables with the suffic _cat.
# b. Recode the variables respptr_b and relspptr_t so that -999/0 become 1
# (No partner), 1 becomes 2 (Spouse), 33 becomes 3 (Partner), and 36 becomes .
# generating the variables famstr_b and famstr_t.
# ------------------------------------------------------------------------------*/
#
# Simple recode with generating new variable
combi$fstbrn <- ifelse(combi$bthordr==1, 1, ifelse(combi$bthordr>=1, 0, NA))
# Recode variable with new value labels
combi$cmale <- ifelse(combi$csex==2,0, ifelse(combi$csex==1,1,NA))
levels(combi$cmale) <- c("Female", "Male", "missed")
# a. Recode the variables so that 0/11 become 1 (No degree), 12 becomes
# 2 (Highschool degree), 13/15 become 3 (Some college), and 16/20 become 4
# (4-year college degree), generating new variables with the suffic _cat.
list=c("hgcrev_b","spousgrade_b","partgrade_b","hgcrev_t","spousgrade_t","partgrade_t")
for (i in list){
combi[[paste0(i,"_cat")]][combi[[i]]>=0 & combi[[i]]<=11]<- 1 # second brackets call the rows with these conditions
combi[[paste0(i,"_cat")]][combi[[i]]==12]<- 2
combi[[paste0(i,"_cat")]][combi[[i]]>=13 & combi[[i]]<=15]<- 3
combi[[paste0(i,"_cat")]][combi[[i]]>=16]<- 4
levels(combi[[paste0(i,"_cat")]]) <- c("No Degree", "Highschool Degree", "Some College", "4-year college degree")
}
# b. Recode the variables respptr_b and relspptr_t so that -999/0 become 1
# (No partner), 1 becomes 2 (Spouse), 33 becomes 3 (Partner), and 36 becomes .
# generating the variables famstr_b and famstr_t.
combi$famstr_b <- ifelse(combi$relspptr_b<0, 1, ifelse(combi$relspptr_b==1,2,ifelse(combi$relspptr_b==33,3,ifelse(combi$relspptr_b==36,NA,NA))))
levels(combi$famstr_b) <- c("No partner","Spouse","Partner", "Missed")
combi$famstr_t <- ifelse(combi$relspptr_t<0, 1, ifelse(combi$relspptr_t==1,2,ifelse(combi$relspptr_t==33,3,ifelse(combi$relspptr_t==36,NA,NA))))
levels(combi$famstr_t) <- c("No partner","Spouse","Partner", "Missed")
# ------------------------------------------------------------------------------
# 3) Generate and replace
# Additional tasks:
# a. Generate the new variables msage_dyr, msage_yr, agech_dyr, and agech_yr
# following the example for csage.
# b. Generate the variable malc so that it containes the information from malcf
# but is 0 if malcu is 0.
# c. Generate the variable lbweight so that it is 0 if birth weight is 88 to 768
# ounces and 1 if it is below 88 ounces.
# d. Generate the variable mdrug so that is 0 if the mother didn't use THC and
# cocain during 12 months before birth and is one if the mother used either in
# that period.
# e. Generate the variable btime so that it is 0 if the child was born around
# the due data (cborndue) and takes the value of nwearlat if it was born
# after the due data (cearlat=2) and the negative value of nwearlat if it was
# born before the due data (cearlat=1).
# ------------------------------------------------------------------------------*/
#
# * generate new variable
combi$csage_dyr <- combi$csage/12 #decimal years
combi$csage_yr <- as.integer(combi$csage_dyr, length=0) #integer cuts of values behind the point
# a. Generate the new variables msage_dyr, msage_yr, agech_dyr, and agech_yr
# following the example for csage.
list2 <- c("msage","agech")
for (i in list2){
combi[[paste0(i,"_dyr")]] <- combi[[i]]/12 #decimal years
combi[[paste0(i,"_yr")]] <- as.integer(combi[[paste0(i,"_dyr")]], length=0) #integer cuts of values behind the point
}
# * genrate and replace (combining information from different variables)
# gen msmoke=msmof
# replace msmoke=0 if msmou==0
combi$msmoke <- combi$msmof
combi$msmoke <- ifelse(combi$msmou==0,0,combi$msmoke)
# b. Generate the variable malc so that it containes the information from malcf
# but is 0 if malcu is 0.
combi$malc <- combi$malcf
combi$msmoke[combi$msmoke==0] <- 0
# c. Generate the variable lbweight so that it is 0 if birth weight is 88 to 768
# ounces and 1 if it is below 88 ounces.
combi$lbweight <- ifelse(combi$cbwght>88 & combi$cbwght<768,0,ifelse(combi$cbwght<88,1,NA))
# d. Generate the variable mdrug so that is 0 if the mother didn't use THC and
# cocain during 12 months before birth and is one if the mother used either in
# that period.
combi$mdrug <- ifelse(combi$mcocu==0 & combi$mthcu==0, 0, ifelse(combi$mcocu==1 & combi$mthcu==1, 1, NA))
# e. Generate the variable btime so that it is 0 if the child was born around
# the due data (cborndue) and takes the value of nwearlat if it was born
# after the due data (cearlat=2) and the negative value of nwearlat if it was
# born before the due data (cearlat=1).
combi$btime <- ifelse(combi$cborndue==1, 0, ifelse(combi$cearlat==2, combi$nwearlat, ifelse(combi$cearlat==1, combi$nwearlat<0, NA)))
# ------------------------------------------------------------------------------
# #4 Lables
# a. Label the following variables:
# caseid "Mother ID"
# year "Survey year"
# fstbrn "Child is firstborn"
# cmale "Child is male"
# hgcrev_t_cat "Mother's education at t"
# spousgrade_t_cat "Spouses's education at t"
# partgrade_t_cat "Partner's education at t"
# famstr_t "Family structure at t"
# hgcrev_b_cat "Mother's education at birth"
# spousgrade_b_cat "Spouses's education at birth"
# partgrade_b_cat "Partner's education at birth"
# famstr_b "Family structure at birth"
# csage_dyr "Age of child at assessment (dec. yrs)"
# msage_dyr "Age of child at mother suppl. (dec. yrs)"
# agech_dyr "Age of child at mother int. (dec. yrs)"
# csage_yr "Age of child at assessment (yrs)"
# msage_yr "Age of child at mother suppl. (yrs)"
# agech_yr "Age of child at mother int. (yrs)"
# msmoke "No. of cigatettes smoked during pregnancy"
# malc "Alcohol use during pregnancy"
# mdrug "Drug use during 12 months before birth"
# lbweight "Child was low birth weight"
# btime "Timing of birth (0=at due date)"
# b. Define the value label msmoke with value 0 (None), 1 (Less than 1 pack/day),
# 2 (1 or more but less than 2), 3 (2 or more packs/day)
# c. Assign the value label yesno to the variables cbrstfd, lbweight, and mdrug
# d. Assign the value label msmoke to the variable msmoke and the value label
# vlC0320300 to the variable malc
# ------------------------------------------------------------------------------
#
#
# * Labelling variables
label(combi$cpubid) <- "Child ID"
# * Defining value labels
yesno <- c("0 No", "1 Yes")
# # label val fstbrn yesno
levels(combi$fstbrn) <- yesno
# Assigning value labels
label(combi$caseid) <- "Mother ID"
label(combi$year) <- "Survey year"
label(combi$fstbrn) <- "Child is firstborn"
label(combi$cmale) <- "Child is male"
label(combi$hgcrev_t_cat) <- "Mother's education at t"
label(combi$spousgrade_t_cat) <- "Spouses's education at t"
label(combi$partgrade_t_cat) <- "Partner's education at t"
label(combi$famstr_t) <- "Family structure at t"
label(combi$hgcrev_b_cat) <- "Mother's education at birth"
label(combi$spousgrade_b_cat) <- "Spouses's education at birth"
label(combi$partgrade_b_cat) <- "Partner's education at birth"
label(combi$famstr_b) <- "Family structure at birth"
label(combi$csage_dyr) <- "Age of child at assessment (dec. yrs)"
label(combi$msage_dyr) <- "Age of child at mother suppl. (dec. yrs)"
label(combi$agech_dyr) <- "Age of child at mother int. (dec. yrs)"
label(combi$csage_yr) <- "Age of child at assessment (yrs)"
label(combi$msage_yr) <- "Age of child at mother suppl. (yrs)"
label(combi$agech_yr) <- "Age of child at mother int. (yrs)"
label(combi$msmoke) <- "No. of cigatettes smoked during pregnancy"
label(combi$malc) <- "Alcohol use during pregnancy"
label(combi$mdrug) <- "Drug use during 12 months before birth"
label(combi$lbweight) <- "Child was low birth weight"
label(combi$btime) <- "Timing of birth (0=at due date)"
#------------------------------------------------------------------------------
# #5 Sort and save the data
# Tasks:
# a. Sort the data by child ID and year
# b. Save the data as clean1.dta in your folder procdata.
# ------------------------------------------------------------------------------
combi[order(combi$cpubid, combi$year),]
save.image(file=file.path(pdata,"clean1.RData"))
|
4215f7fae4dcfbbf4f3058bd8bb2d49fb2e18f23
|
28d8018036af366af8da2a8ff699b8bf1e419ec6
|
/R/c_artificial.R
|
557617275e112efc466d96b2b073500ed98731f0
|
[] |
no_license
|
cran/intpoint
|
d98a8251f17921ed878a402930707314b04d046c
|
a803f5aba0b5a39ad25f88ca2b63fc2bf4063fcf
|
refs/heads/master
| 2020-04-13T15:21:07.574580
| 2012-05-30T00:00:00
| 2012-05-30T00:00:00
| 17,696,805
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 143
|
r
|
c_artificial.R
|
c_artificial <-
function(c,A){
col<-ncol(A)
l<-length(c)
cn<-array(0,c(col,1))
for(i in 1:l)
cn[i]<-c[i]
cn[col]<-1000000
return(cn)}
|
952207b8ce1d303398afd7c44a15fc9a3234d60d
|
974c2c2f10f94098e90d8a1bfc9cd167e2340b58
|
/man/spr_heat_scrape.Rd
|
c38c1316cf4594c1fa5b38ea9600182b477defb8
|
[] |
no_license
|
joranE/fiscrape
|
784983a3314f0004983182019075c5f777902dd0
|
e5026654836cd87b944b5382ea72df9a46777f63
|
refs/heads/master
| 2023-04-07T00:14:03.133624
| 2022-11-27T02:26:22
| 2022-11-27T02:26:22
| 9,248,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 260
|
rd
|
spr_heat_scrape.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spr_heat_scrape.R
\name{spr_heat_scrape}
\alias{spr_heat_scrape}
\title{Scrape WC Sprint Heat Times}
\usage{
spr_heat_scrape(url, race)
}
\description{
Scrape WC Sprint Heat Times
}
|
71a9b222cdf01809f084c6a7163a28c72549f09e
|
11de724b543fc892da056f0d960e154548a52acb
|
/plot/density/prediction_score.R
|
02bc5c579967947e0cb0cef56b5325dd3f8e2511
|
[] |
no_license
|
Luanshasha/Bioinformatics_ML
|
695f9fba00bdcd49ac90cb997054f464d3690383
|
59c0cc7833b8e93b16e9c03a889377d63b5ecd82
|
refs/heads/master
| 2021-06-28T19:18:57.481986
| 2021-02-02T06:42:35
| 2021-02-02T06:42:35
| 219,302,875
| 2
| 1
| null | null | null | null |
GB18030
|
R
| false
| false
| 2,952
|
r
|
prediction_score.R
|
###S3_prediction_score
library(ggplot2)
library(grid)
###图1
test <- read.csv("C:/Users/Lenovo/Desktop/R/S3_presion.csv")
data1 <- data.frame(labels = test[,1],scores=test[,2])
p1 <- ggplot(data1 ,aes(x=scores,fill=labels))+
geom_density(alpha=.3)+labs(tag = "A",y="Density",x="Prediction scores")+
guides(fill = guide_legend(title = NULL,reverse=TRUE))+ ###去掉图标的头文本,将图例的进行翻转
theme(axis.text = element_text(size = rel(1.2),colour="black"), ###设置坐标轴文本的大小
axis.title = element_text(size = rel(1.2),colour="black"), ###设置坐标轴标签的大小
panel.background = element_rect(fill = "white", colour = "black"),
#panel.border = element_rect(color="black"),
panel.grid.major = element_blank(), ###将面板背景变空白
panel.grid.minor = element_blank(),
legend.text=element_text(size=10),
#axis.line=element_line(color="black",size=1),
legend.position=c(0.52,0.86), ###图标的位置
axis.title.x = element_text(vjust=-1), ###x轴标签距离x轴的大小
axis.title.y = element_text(vjust=5), ###y轴标签距离y轴的大小
axis.ticks = element_line(size=1), ###坐标轴的刻度粗细
)
###图2
test <- read.csv("C:/Users/Lenovo/Desktop/R/S3_presion.csv")
data2 <- data.frame(labels = test[,1],scores=test[,2])
p2 <- ggplot(data2 ,aes(x=scores,fill=labels))+
geom_density(alpha=.3)+labs(tag = "B",y="Density",x="Prediction scores")+
guides(fill = guide_legend(title = NULL,reverse=TRUE))+ ###去掉图标的头文本,将图例的进行翻转
theme(axis.text = element_text(size = rel(1.2),colour="black"), ###设置坐标轴文本的大小
axis.title = element_text(size = rel(1.2),colour="black"), ###设置坐标轴标签的大小
panel.background = element_rect(fill = "white", colour = "black"),
#panel.border = element_rect(color="black"),
panel.grid.major = element_blank(), ###将面板背景变空白
panel.grid.minor = element_blank(),
legend.text=element_text(size=10),
#axis.line=element_line(color="black",size=1),
legend.position=c(0.52,0.86), ###图标的位置
axis.title.x = element_text(vjust=-1), ###x轴标签距离x轴的大小
axis.title.y = element_text(vjust=5), ###y轴标签距离y轴的大小
axis.ticks = element_line(size=1), ###坐标轴的刻度粗细
)
###显示图片
vplayout <- function(x,y){viewport(layout.pos.row = x, layout.pos.col = y)}
grid.newpage() ###新建图表版面
pushViewport(viewport(layout = grid.layout(2,1)))
print(p1, vp = vplayout(1,1)) ###将(2,1)的位置画图p1
print(p2, vp = vplayout(2,1)) ###将(2,1)的位置画图p1
###保存图片
pdf("C:/Users/Lenovo/Desktop/R/density16.pdf",width=4,height=8)### 保存图片
vplayout <- function(x,y){viewport(layout.pos.row = x, layout.pos.col = y)}
grid.newpage() ###新建图表版面
pushViewport(viewport(layout = grid.layout(2,1)))
print(p1, vp = vplayout(1,1)) ###将(2,1)的位置画图p1
print(p2, vp = vplayout(2,1)) ###将(2,1)的位置画图p1
dev.off()
|
7d24b2bc78a9940480f07f42f3fc777656799904
|
bab0ce3c8a81762101b3af466d018f5425ab7fcc
|
/covidhub-common.R
|
e994a1df74d0b45cb1487a1805e18a9c26d3dbb6
|
[
"MIT"
] |
permissive
|
signaturescience/random-walks
|
1f0e6f5127e1ffdba76283fc6340d66dea1a79b6
|
853f96aae54b2985d1c55ef9be5da341d945081a
|
refs/heads/master
| 2023-01-22T22:02:08.643403
| 2020-12-07T07:28:36
| 2020-12-07T07:28:36
| 319,381,303
| 0
| 0
| null | 2020-12-07T16:39:00
| 2020-12-07T16:38:59
| null |
UTF-8
|
R
| false
| false
| 9,296
|
r
|
covidhub-common.R
|
state_abb_fips <-
readr::read_csv(
file = "state,state_code,state_name
AK,02,Alaska
AL,01,Alabama
AR,05,Arkansas
AS,60,American Samoa
AZ,04,Arizona
CA,06,California
CO,08,Colorado
CT,09,Connecticut
DC,11,District of Columbia
DE,10,Delaware
FL,12,Florida
GA,13,Georgia
GU,66,Guam
HI,15,Hawaii
IA,19,Iowa
ID,16,Idaho
IL,17,Illinois
IN,18,Indiana
KS,20,Kansas
KY,21,Kentucky
LA,22,Louisiana
MA,25,Massachusetts
MD,24,Maryland
ME,23,Maine
MI,26,Michigan
MN,27,Minnesota
MO,29,Missouri
MP,69,Northern Mariana Islands
MS,28,Mississippi
MT,30,Montana
NC,37,North Carolina
ND,38,North Dakota
NE,31,Nebraska
NH,33,New Hampshire
NJ,34,New Jersey
NM,35,New Mexico
NV,32,Nevada
NY,36,New York
OH,39,Ohio
OK,40,Oklahoma
OR,41,Oregon
PA,42,Pennsylvania
PR,72,Puerto Rico
RI,44,Rhode Island
SC,45,South Carolina
SD,46,South Dakota
TN,47,Tennessee
TX,48,Texas
UM,74,U.S. Minor Outlying Islands
UT,49,Utah
VA,51,Virginia
VI,78,Virgin Islands
VT,50,Vermont
WA,53,Washington
WI,55,Wisconsin
WV,54,West Virginia
WY,56,Wyoming"
)
covidhub_locations <- c("District of Columbia", state.name, "All")
process_hopkins <- function(path){
is_deaths <- str_detect(path, "deaths_US.csv$")
if (is_deaths){
targ_suffix <- " death"
targ_pattern <- "^wk ahead inc|^wk ahead cum"
} else {
targ_suffix <- " case"
targ_pattern <- "^wk ahead inc"
}
## construct a col spec to read in only the state and date columns
colnms <- readLines(path, n = 1) %>% str_split(",") %>% "[["(1)
is_date_col <- colnms %>% str_detect("^\\d{1,2}/\\d{1,2}/\\d{2}$")
date_cols <- colnms[is_date_col]
colspec <- sapply(date_cols, function(x)
"i")
col_types <-
do.call(cols_only, c(list(FIPS = col_double(),
Province_State = col_character()),
as.list(colspec)))
hpd_raw <-
read_csv(path, col_types = col_types) %>%
pivot_longer(-c(Province_State, FIPS), names_to = "date_string", values_to = "day ahead cum") %>%
mutate(target_end_date = lubridate::mdy(date_string)) %>%
mutate(location = sprintf("%05d", FIPS)) %>%
mutate(location = str_remove(location, "^000")) %>%
select(-FIPS)
hpd_county <- hpd_raw %>% filter(location !=" NA") %>%
filter(!str_detect(location, "^900|^999|^800|^888"))
hpd_state <- hpd_raw %>% group_by(Province_State, target_end_date) %>%
mutate(has_id = str_detect(location, "^[0-9][0-9]")) %>%
summarise(`day ahead cum` = sum(`day ahead cum`),
location = substring(location[has_id][1], 1, 2),
n = n(), .groups = "drop") %>%
filter(n > 1) %>%
select(-n)
hpd_us <- hpd_raw %>% group_by(target_end_date) %>%
summarise(`day ahead cum` = sum(`day ahead cum`),
location = "US", .groups = "drop")
hpd <- bind_rows(hpd_county, hpd_state, hpd_us) %>%
arrange(location, target_end_date) %>%
group_by(location) %>%
mutate(`day ahead inc`= c(NA, diff(`day ahead cum`)))
hpd2 <-
hpd %>% mutate(week = lubridate::epiweek(target_end_date)) %>%
group_by(location, week) %>%
arrange(location, week, target_end_date) %>%
summarise(`wk ahead inc` = sum(`day ahead inc`),
`wk ahead cum` = tail(`day ahead cum`, n = 1),
target_end_date = tail(target_end_date, n = 1),
n = n(), .groups = "drop") %>%
filter(n == 7) %>%
select(-n, -week)
hpd3 <-
hpd2 %>% pivot_longer(-c(target_end_date, location),
names_to = "target_type", values_to = "value") %>%
mutate(target_type = str_c(target_type, targ_suffix)) %>%
filter(str_detect(target_type, targ_pattern)) %>%
filter(!str_detect(location, "72[0-9]{3}")) %>% #remove PR counties
filter(location != "11001") # remove duplicated location of DC as county
hpd3
}
load_hopkins <- function(loc) {
# load and clean data
hpp <- dir(loc, full.names = TRUE)
hpd <- str_subset(hpp, "time_series_covid19_deaths_US.csv$")
hpc <- str_subset(hpp, "time_series_covid19_confirmed_US.csv$")
ddat <- process_hopkins(hpd)
cdat <- process_hopkins(hpc)
bind_rows(ddat, cdat)
}
load_covidtracking <- function(loc) {
sn <- state_abb_fips$state_name
names(sn) <- state_abb_fips$state
tfile <-
dir(loc, pattern = "^covid19us-.*\\.csv$", full.names = TRUE) %>%
tail(1)
tdf <- read_csv(
tfile,
col_types = cols_only(
date = col_date(),
state = col_character(),
hospitalized_increase = col_integer()
)
) %>%
mutate(Province_State = sn[state]) %>%
select(-state)
fips <- state_abb_fips$state_code
names(fips) <- state_abb_fips$state_name
tdf2 <-
tdf %>%
add_column(target_type = "day ahead inc hosp") %>%
rename(target_end_date = date,
value = hospitalized_increase) %>%
mutate(location = fips[Province_State])
tdf3 <-
tdf2 %>% group_by(target_end_date) %>%
summarise(value = sum(value)) %>%
mutate(Province_State = "All",
location = "US") %>%
add_column(target_type = "day ahead inc hosp")
tdf4 <- bind_rows(tdf2, tdf3)
tdf4 %>% filter(Province_State %in% covidhub_locations)
}
pull_data <- function(compid, dir){
tstamp <- format(Sys.time(), "%Y-%m-%d--%H-%M-%S")
if (compid == "state"){
idt <- cdcfluview::ilinet(region = c("state"))
stem <- "state-ILInet"
} else if (compid == "national") {
idt_nat <- cdcfluview::ilinet(region = c("national")) %>% mutate(region = as.character(region))
idt_reg <- cdcfluview::ilinet(region = c("hhs")) %>% mutate(region = as.character(region))
idt <- bind_rows(idt_nat, idt_reg)
stem <- "national-regional-ILInet"
} else if (compid == "hosp") {
idt <- covid19us::get_states_daily(state = "GA")
stem <- "covid19us"
}
file <- paste0(stem, "-", tstamp, ".csv")
path <- file.path(dir, file)
if(!dir.exists(dir)) dir.create(dir)
write_csv(idt, path) %>% tail()
}
read_forecast <- function(file) {
read_csv(
file,
col_types =
cols(
forecast_date = col_date(format = "%Y-%m-%d"),
target = col_character(),
target_end_date = col_date(format = "%Y-%m-%d"),
location = col_character(),
type = col_character(),
quantile = col_double(),
value = col_double()
)
)
}
quant <- function(x, p){
quantile(x, prob = p, names = FALSE, type = 8, na.rm = TRUE)
}
vardf <- function(var, samp){
cname <- switch(var,
"inc hosp" = "hosps",
"inc death" = "deaths",
"inc case" = "cases",
"cum death" = "cum_deaths")
if (var != "inc case") {
prob <- c(0.01, 0.025, seq(0.05, 0.95, by = 0.05), 0.975, 0.99)
} else {
prob <- c(0.025, 0.100, 0.250, 0.500, 0.750, 0.900, 0.975)
}
t1 <- tibble(
target = var,
type = "quantile",
quantile = prob,
value = quant(samp[[cname]], prob)
)
t2 <-
tibble(
target = var,
type = "point",
quantile = NA,
value = quant(samp[[cname]], 0.5)
)
bind_rows(t1, t2)
}
samp_to_df <-
function(sampdf,
vars = c("inc hosp", "inc death", "cum death", "inc case")) {
purrr::map_dfr(vars, vardf, samp = sampdf)
}
# Take simulation trajectories and output a data frame in the format described
# here: https://github.com/reichlab/covid19-forecast-hub/blob/6a7e5624ef540a55902770b7c17609d19e1f593a/data-processed/README.md
paths_to_forecast <- function(out, loc = "13", wks_ahead = 1:6, hop, fdt) {
if(any(wks_ahead > 20)){
stop("Max weeks ahead accepted is 20", .call = FALSE)
}
out2 <-
out %>% group_by(Rep) %>% arrange(Date) %>%
mutate(cum_deaths = cumsum(deaths),
day_diff = c(NA, diff(Date)))
prior_deaths <- hop %>%
filter(target_end_date < fdt & location == loc &
target_type == "wk ahead cum death") %>%
arrange(target_end_date) %>%
pull("value") %>%
tail(n = 1)
out2$cum_deaths <- out2$cum_deaths + prior_deaths
take_back_step <- lubridate::wday(fdt, label = TRUE) %in% c("Sun", "Mon")
week0 <- lubridate::epiweek(fdt) - take_back_step
forecast_epiweeks <- week0 + wks_ahead
if(any(na.omit(out2$day_diff) > 7)){
stop("Non-continuous series of weeks, unable to compute cumulative forecasts",
.call = FALSE)
}
weekly <- out2 %>%
as_tibble() %>%
mutate(epiweek = lubridate::epiweek(Date)) %>%
filter(epiweek %in% forecast_epiweeks) %>%
rename("target_end_date" = Date) %>%
nest(data = c(Rep, cases, deaths, cum_deaths)) %>%
mutate(pred_df = purrr::map(data, samp_to_df,
vars = c("inc death", "cum death", "inc case"))) %>%
select(-data) %>%
unnest(pred_df) %>%
mutate(target = paste(epiweek - week0,
"wk ahead", target)) %>%
add_column(location = loc) %>%
mutate(quantile = round(quantile, digits = 3),
value = round(value, digits = 3)) %>%
add_column(forecast_date = fdt) %>%
select(forecast_date, target, target_end_date, location, type, quantile,
value)
weekly %>%
filter(!is.na(value)) %>%
filter((nchar(location)) <= 2 | str_detect(target, "inc case$")) ## only case forecasts accepted for counties
}
|
3fbff6ca6c2f8c6382b7ec061577a5d39c06dc2b
|
ec37ae37c4f29995ead8b1ba212b01000033088f
|
/4.ExploratoryDataAnalysis_Week4/plot2.R
|
4251ca4635724d8bc5fb8126383a9644fa00f073
|
[] |
no_license
|
williamrelf/DataScienceCoursera
|
48430bbaff61cd26e83d1c03a3fb407063decb6f
|
face3687bacd507176c3bb93de349f2271df88cd
|
refs/heads/master
| 2021-01-21T12:59:40.002900
| 2016-05-25T13:49:12
| 2016-05-25T13:49:12
| 51,294,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 954
|
r
|
plot2.R
|
##Download the zip file, if not exists, containing the data and unpack it
if(!file.exists("FNEI_data/summarySCC_PM25.rds")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", destfile = "FNEI_data.zip")
unzip(zipfile = "FNEI_data.zip", exdir = "FNEI_data")
}
##Load in the two containing files
SourceClassificationCode <- readRDS("FNEI_data/Source_Classification_Code.rds")
SummarySccPM25 <- readRDS("FNEI_data/summarySCC_PM25.rds")
##Subset the baltimore city data
Baltimore <- subset(SummarySccPM25, fips == "24510")
##Calculate the yearly total
YearTotal <- with(Baltimore, tapply(Emissions, as.character(year), sum, na.rm = TRUE))
##Plot the year on year totals against each other
plot(x = names(YearTotal), y = YearTotal, type = "l", xlab = "Year", ylab = "Total Emissions (Tons)", main = "The total emissions for Baltimore City, in tons, by year.")
##Save to file
dev.copy(png, "plot2.png")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.