content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
suppressMessages(library(dplyr))
suppressMessages(library(lintr))
suppressMessages(library(ggplot2))
suppressMessages(library(tidyr))
juvenile_df <- read.csv("data/juvenile_crime.csv", stringsAsFactors = FALSE)
totals <- function(df, col_name) {
age_gender_total <- df %>%
select(all_of(col_name)) %>%
filter(!is.na(!!as.name(col_name))) %>%
mutate(total = cumsum(!!as.name(col_name))) %>%
filter(total == max(total, na.rm = TRUE)) %>%
pull(total)
}
total_by_gender <- data.frame(
age_group = c("0-9", "10-12", "13-14", "15", "16", "17"),
male = c(totals(juvenile_df, "m_0_9"), totals(juvenile_df, "m_10_12"),
totals(juvenile_df, "m_13_14"), totals(juvenile_df, "m_15"),
totals(juvenile_df, "m_16"), totals(juvenile_df, "m_17")),
female = c(totals(juvenile_df, "f_0_9"), totals(juvenile_df, "f_10_12"),
totals(juvenile_df, "f_13_14"), totals(juvenile_df, "f_15"),
totals(juvenile_df, "f_16"), totals(juvenile_df, "f_17"))
)
bar <- total_by_gender %>%
gather("Demographic", "Total", -age_group) %>%
ggplot(aes(age_group, Total, fill = Demographic)) +
geom_bar(position = "dodge", stat = "identity") +
labs(title = "Juvenile Crime by Gender and Age Group in the US, 1994-2016",
x = "Age Group", y = "Total Number of People")
| /scripts/age_and_gender.R | permissive | umoazzam/uwinfo201_final_project | R | false | false | 1,354 | r | suppressMessages(library(dplyr))
suppressMessages(library(lintr))
suppressMessages(library(ggplot2))
suppressMessages(library(tidyr))
juvenile_df <- read.csv("data/juvenile_crime.csv", stringsAsFactors = FALSE)
totals <- function(df, col_name) {
age_gender_total <- df %>%
select(all_of(col_name)) %>%
filter(!is.na(!!as.name(col_name))) %>%
mutate(total = cumsum(!!as.name(col_name))) %>%
filter(total == max(total, na.rm = TRUE)) %>%
pull(total)
}
total_by_gender <- data.frame(
age_group = c("0-9", "10-12", "13-14", "15", "16", "17"),
male = c(totals(juvenile_df, "m_0_9"), totals(juvenile_df, "m_10_12"),
totals(juvenile_df, "m_13_14"), totals(juvenile_df, "m_15"),
totals(juvenile_df, "m_16"), totals(juvenile_df, "m_17")),
female = c(totals(juvenile_df, "f_0_9"), totals(juvenile_df, "f_10_12"),
totals(juvenile_df, "f_13_14"), totals(juvenile_df, "f_15"),
totals(juvenile_df, "f_16"), totals(juvenile_df, "f_17"))
)
bar <- total_by_gender %>%
gather("Demographic", "Total", -age_group) %>%
ggplot(aes(age_group, Total, fill = Demographic)) +
geom_bar(position = "dodge", stat = "identity") +
labs(title = "Juvenile Crime by Gender and Age Group in the US, 1994-2016",
x = "Age Group", y = "Total Number of People")
|
## Diebold Mariano simulations
library(tidyverse)
library(xtable)
load("/Users/alexanderbech/Dropbox/Project/diebold_sim.RData")
DB.a.50 = sapply(1:50, function(i) MC.a.50[[i]]$`Diebold Mariano test`) %>%
rowMeans() %>% matrix(.,nrow = 10)
DB.a.100 = sapply(1:50, function(i) MC.a.100[[i]]$`Diebold Mariano test`) %>%
rowMeans() %>% matrix(.,nrow = 10)
DB.b.50 = sapply(1:50, function(i) MC.b.50[[i]]$`Diebold Mariano test`) %>%
rowMeans() %>% matrix(.,nrow = 10)
DB.b.100 = sapply(1:50, function(i) MC.b.100[[i]]$`Diebold Mariano test`) %>%
rowMeans() %>% matrix(.,nrow = 10)
DB.grid = c("OLS" = 1, "Ridge" = 2,
"Lasso" = 3, "ENET" = 4, "PCR"=5,
"PLSR"=6, "GLM" = 7, "RandomF" = 8,
"GBRT" = 9,
"Oracle"=10)
colnames(DB.a.50) = names(DB.grid)
rownames(DB.a.50) = names(DB.grid)
colnames(DB.a.100) = names(DB.grid)
rownames(DB.a.100) = names(DB.grid)
colnames(DB.b.50) = names(DB.grid)
rownames(DB.b.50) = names(DB.grid)
colnames(DB.b.100) = names(DB.grid)
rownames(DB.b.100) = names(DB.grid)
print(xtable(DB.a.50, type = "latex"), file = "DB.a.50.tex")
print(xtable(DB.a.100, type = "latex"), file = "DB.a.100.tex")
print(xtable(DB.b.50, type = "latex"), file = "DB.b.50.tex")
print(xtable(DB.b.100, type = "latex"), file = "DB.b.100.tex")
#Simulation manipulatio
load("/Users/alexanderbech/Dropbox/Project/MCfinal.RData")
collect.MC.a.50 = sapply(1:13,
function(j) sapply(1:50, function(i) MC.a.50[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.50 = sapply(1:13,
function(j) sapply(1:50, function(i) MC.b.50[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.a.100 = sapply(1:13,
function(j) sapply(1:50, function(i) MC.a.100[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.100 = sapply(1:13,
function(j) sapply(1:50, function(i) MC.b.100[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
MC_table = data.frame(
"$R^2(%)$" = names(MC.a.50[[1]][1:13]),
"IS" = collect.MC.a.50[2,],
"OOS" = collect.MC.a.50[1,],
"IS" = collect.MC.a.100[2,],
"OOS" = collect.MC.a.100[1,],
"IS" = collect.MC.b.50[2,],
"OOS" = collect.MC.b.50[1,],
"IS" = collect.MC.b.100[2,],
"OOS" = collect.MC.b.100[1,]
)
print(xtable(MC_table, type = "latex"), file = "simulation_table1.tex")
### For longer horizons #####
load("/Users/alexanderbech/Dropbox/Project/MC_quart2.RData")
load("/Users/alexanderbech/Dropbox/Project/MC_half.RData")
load("/Users/alexanderbech/Dropbox/Project/MC_annual.RData")
collect.MC.a.100.q = sapply(1:11,
function(j) sapply(1:50, function(i) MC.a.100.q[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.100.q = sapply(1:11,
function(j) sapply(1:50, function(i) MC.b.100.q[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.a.100.h = sapply(1:11,
function(j) sapply(1:50, function(i) MC.a.100.h[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.100.h = sapply(1:11,
function(j) sapply(1:50, function(i) MC.b.100.h[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.a.100.a = sapply(1:11,
function(j) sapply(1:50, function(i) MC.a.100.a[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.100.a = sapply(1:11,
function(j) sapply(1:50, function(i) MC.b.100.a[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
MC_table_horizon = data.frame(
"$R^2(%)$" = names(MC.a.100.q[[1]][1:11]),
"IS" = collect.MC.a.100.q[2,],
"OOS" = collect.MC.a.100.q[1,],
"IS" = collect.MC.a.100.h[2,],
"OOS" = collect.MC.a.100.h[1,],
"IS" = collect.MC.a.100.a[2,],
"OOS" = collect.MC.a.100.a[1,],
"IS" = collect.MC.b.100.q[2,],
"OOS" = collect.MC.b.100.q[1,],
"IS" = collect.MC.b.100.h[2,],
"OOS" = collect.MC.b.100.h[1,],
"IS" = collect.MC.b.100.a[2,],
"OOS" = collect.MC.b.100.a[1,]
)
print(xtable(MC_table_horizon, type = "latex"),
file = "/Users/alexanderbech/Dropbox/Project/simulation_table_hor.tex")
#################################################################
############# Output from data ##########################
#################################################################
#test sample goes from 637 to 849
test.dates = seq(2000,2017)
load("/Users/alexanderbech/Dropbox/Project/data_output_2.RData")
lasso.compl = sapply(1:18, function(i) sum(output$models$lasso[[i]]$model!=0))
enet.compl = sapply(1:18, function(i) sum(output$models$enet[[i]]$model!=0))
pcr.compl = sapply(1:18, function(i) output$models$pcr[[i]]$ncomp)
plsr.compl = sapply(1:18, function(i) output$models$plsr[[i]]$ncomp)
RF.compl = sapply(1:18, function(i) output$models$RF[[i]]$model$min.node.size)
boost.compl = sapply(1:18, function(i) output$models$GBRT[[i]]$depth)
par(mfrow=c(3,2), mar=c(2,4,2,1))
plot(test.dates, lasso.compl,
main="Lasso",
ylab="df", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, enet.compl,
main="ENet",
ylab="df", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, pcr.compl,
main="PCR",
ylab="ncomp", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, plsr.compl,
main="PLSR",
ylab="ncomp", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, RF.compl,
main="Random Forest",
ylab="node size", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, boost.compl,
main="GBRT",
ylab="depth", xlab = "",
type="l",
col="blue")
grid()
Diebold = as.data.frame(output$Diebold)
DB.grid = c("OLS" = 1, "Ridge" = 2,
"Lasso" = 3, "ENET" = 4, "PCR"=5,
"PLSR"=6, "GLM" = 7, "RandomF" = 8,
"GBRT" = 9)
colnames(Diebold) = names(DB.grid)
rownames(Diebold) = names(DB.grid)
dim(Diebold) #9x9
Diebold = Diebold[1:8, 2:9]
print(xtable(Diebold, type = "latex"),
file = "/Users/alexanderbech/Dropbox/Project/Diebold.tex")
| /R/Sim_tidying.R | no_license | andreaslillevangbech/M2-project | R | false | false | 7,062 | r | ## Diebold Mariano simulations
library(tidyverse)
library(xtable)
load("/Users/alexanderbech/Dropbox/Project/diebold_sim.RData")
DB.a.50 = sapply(1:50, function(i) MC.a.50[[i]]$`Diebold Mariano test`) %>%
rowMeans() %>% matrix(.,nrow = 10)
DB.a.100 = sapply(1:50, function(i) MC.a.100[[i]]$`Diebold Mariano test`) %>%
rowMeans() %>% matrix(.,nrow = 10)
DB.b.50 = sapply(1:50, function(i) MC.b.50[[i]]$`Diebold Mariano test`) %>%
rowMeans() %>% matrix(.,nrow = 10)
DB.b.100 = sapply(1:50, function(i) MC.b.100[[i]]$`Diebold Mariano test`) %>%
rowMeans() %>% matrix(.,nrow = 10)
DB.grid = c("OLS" = 1, "Ridge" = 2,
"Lasso" = 3, "ENET" = 4, "PCR"=5,
"PLSR"=6, "GLM" = 7, "RandomF" = 8,
"GBRT" = 9,
"Oracle"=10)
colnames(DB.a.50) = names(DB.grid)
rownames(DB.a.50) = names(DB.grid)
colnames(DB.a.100) = names(DB.grid)
rownames(DB.a.100) = names(DB.grid)
colnames(DB.b.50) = names(DB.grid)
rownames(DB.b.50) = names(DB.grid)
colnames(DB.b.100) = names(DB.grid)
rownames(DB.b.100) = names(DB.grid)
print(xtable(DB.a.50, type = "latex"), file = "DB.a.50.tex")
print(xtable(DB.a.100, type = "latex"), file = "DB.a.100.tex")
print(xtable(DB.b.50, type = "latex"), file = "DB.b.50.tex")
print(xtable(DB.b.100, type = "latex"), file = "DB.b.100.tex")
#Simulation manipulatio
load("/Users/alexanderbech/Dropbox/Project/MCfinal.RData")
collect.MC.a.50 = sapply(1:13,
function(j) sapply(1:50, function(i) MC.a.50[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.50 = sapply(1:13,
function(j) sapply(1:50, function(i) MC.b.50[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.a.100 = sapply(1:13,
function(j) sapply(1:50, function(i) MC.a.100[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.100 = sapply(1:13,
function(j) sapply(1:50, function(i) MC.b.100[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
MC_table = data.frame(
"$R^2(%)$" = names(MC.a.50[[1]][1:13]),
"IS" = collect.MC.a.50[2,],
"OOS" = collect.MC.a.50[1,],
"IS" = collect.MC.a.100[2,],
"OOS" = collect.MC.a.100[1,],
"IS" = collect.MC.b.50[2,],
"OOS" = collect.MC.b.50[1,],
"IS" = collect.MC.b.100[2,],
"OOS" = collect.MC.b.100[1,]
)
print(xtable(MC_table, type = "latex"), file = "simulation_table1.tex")
### For longer horizons #####
load("/Users/alexanderbech/Dropbox/Project/MC_quart2.RData")
load("/Users/alexanderbech/Dropbox/Project/MC_half.RData")
load("/Users/alexanderbech/Dropbox/Project/MC_annual.RData")
collect.MC.a.100.q = sapply(1:11,
function(j) sapply(1:50, function(i) MC.a.100.q[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.100.q = sapply(1:11,
function(j) sapply(1:50, function(i) MC.b.100.q[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.a.100.h = sapply(1:11,
function(j) sapply(1:50, function(i) MC.a.100.h[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.100.h = sapply(1:11,
function(j) sapply(1:50, function(i) MC.b.100.h[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.a.100.a = sapply(1:11,
function(j) sapply(1:50, function(i) MC.a.100.a[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
collect.MC.b.100.a = sapply(1:11,
function(j) sapply(1:50, function(i) MC.b.100.a[[i]][[j]][1:2]) %>%
t() %>% #ifelse(.>(-30), ., NA) %>%
colMeans(na.rm = TRUE) )
MC_table_horizon = data.frame(
"$R^2(%)$" = names(MC.a.100.q[[1]][1:11]),
"IS" = collect.MC.a.100.q[2,],
"OOS" = collect.MC.a.100.q[1,],
"IS" = collect.MC.a.100.h[2,],
"OOS" = collect.MC.a.100.h[1,],
"IS" = collect.MC.a.100.a[2,],
"OOS" = collect.MC.a.100.a[1,],
"IS" = collect.MC.b.100.q[2,],
"OOS" = collect.MC.b.100.q[1,],
"IS" = collect.MC.b.100.h[2,],
"OOS" = collect.MC.b.100.h[1,],
"IS" = collect.MC.b.100.a[2,],
"OOS" = collect.MC.b.100.a[1,]
)
print(xtable(MC_table_horizon, type = "latex"),
file = "/Users/alexanderbech/Dropbox/Project/simulation_table_hor.tex")
#################################################################
############# Output from data ##########################
#################################################################
#test sample goes from 637 to 849
test.dates = seq(2000,2017)
load("/Users/alexanderbech/Dropbox/Project/data_output_2.RData")
lasso.compl = sapply(1:18, function(i) sum(output$models$lasso[[i]]$model!=0))
enet.compl = sapply(1:18, function(i) sum(output$models$enet[[i]]$model!=0))
pcr.compl = sapply(1:18, function(i) output$models$pcr[[i]]$ncomp)
plsr.compl = sapply(1:18, function(i) output$models$plsr[[i]]$ncomp)
RF.compl = sapply(1:18, function(i) output$models$RF[[i]]$model$min.node.size)
boost.compl = sapply(1:18, function(i) output$models$GBRT[[i]]$depth)
par(mfrow=c(3,2), mar=c(2,4,2,1))
plot(test.dates, lasso.compl,
main="Lasso",
ylab="df", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, enet.compl,
main="ENet",
ylab="df", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, pcr.compl,
main="PCR",
ylab="ncomp", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, plsr.compl,
main="PLSR",
ylab="ncomp", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, RF.compl,
main="Random Forest",
ylab="node size", xlab = "",
type="l",
col="blue")
grid()
plot(test.dates, boost.compl,
main="GBRT",
ylab="depth", xlab = "",
type="l",
col="blue")
grid()
Diebold = as.data.frame(output$Diebold)
DB.grid = c("OLS" = 1, "Ridge" = 2,
"Lasso" = 3, "ENET" = 4, "PCR"=5,
"PLSR"=6, "GLM" = 7, "RandomF" = 8,
"GBRT" = 9)
colnames(Diebold) = names(DB.grid)
rownames(Diebold) = names(DB.grid)
dim(Diebold) #9x9
Diebold = Diebold[1:8, 2:9]
print(xtable(Diebold, type = "latex"),
file = "/Users/alexanderbech/Dropbox/Project/Diebold.tex")
|
#' Estimate photosynthesis parameters for C4 species using Sharkey's fitting procedure
#'
#' Using the gas exchange measurement (A_Ci curve), C4 photosynthesis model without
#' carbonic anhydrase and Sharkey et al. (2007) fitting processure to do nonlinear
#' curve fitting (using nlminb package) for estimating photosynthesis parameters
#' (Vcmax,J,Rd,gm and Vpmax) for C4 species. The difference
#' between this method with C4EstimateWithoutCA is that temperature response
#' parameters need to be provided by the users. Thus, this method provides the option
#' to alter temperature response parameters. If only planing to alter several
#' parameters, not all of them, one can use the other parameters provided by
#' Table S1 in Zhou et al. (2019) ("Deriving C4 photosynthesis parameters by fitting
#' intensive A/Ci curves"). Make sure to load the "stats" package
#' before intstalling and using the "C4Estimation" package.
#' @param ACi Gas exchange measurement from Li6400 or other equipment. It is a
#' dataframe iput. Ci with the unit of ppm. You can prepare the data in Excel file
#' like the given example and save it as "tab delimited text". Then import data by
#' ACi <- read.table(file = "/Users/haoranzhou/Desktop/Learn R/ACi curve.txt",header
#' = TRUE)
#' @param Tleaf Leaf temperature when A_Ci curve is measured.
#' @param Patm Atmosphere pressure when A_Ci curve is measured.
#' @param alpha1 The fraction of O2 evolution occurring in the bundle sheath. Unless
#' you have enough information, input it as the 0.15.
#' @param x the fraction of total electron transport that are confined to be
#' used for the PEP regeneration out of J, which is the total electron transport.
#' @param CaBreakL Higher bound of Ci below which A is thought to be controled by
#' Rubisco Carboxylation (Ac). Start with 10.
#' @param CaBreakH Lower bound of Ci above which A is thought to be controled by RuBP
#' regeneration (Aj). Start with 50. If the estimation results showed "inadmissible
#' fits", change the CaBreakL and CaBreakH until "inadmissible fits" disappear.
#' @param startp A vector that gives the start points for the estimation (c(Vcmax,
#' J,Rd,gm and Vpmax))
#' @param TresponseKc A vector that gives the temperature response parameters for the
#' Kc (c(Kc25,deltaHaKc))
#' @param TresponseKo A vector that gives the temperature response parameters for the
#' Ko (c(Ko25,deltaHaKo))
#' @param Tresponsegammastar A vector that gives the temperature response parameters for the
#' gammastar (c(gammastar25,deltaHagammastar))
#' @param TresponseKp A vector that gives the temperature response parameters for the
#' Kp (c(Kp25,deltaHaKp))
#' @param Tresponsegbs A vector that gives the temperature response parameters for the
#' gbs (c(gbs25,deltaHagbs,deltaHdgbs,deltaSgbs))
#' @param TresponseVcmax A vector that gives the temperature response parameters for the
#' Vcmax (c(deltaHaVcmax))
#' @param TresponseJ A vector that gives the temperature response parameters for the
#' J (c(deltaHaJ,deltaHdJ,deltaSJ))
#' @param TresponseRd A vector that gives the temperature response parameters for the
#' Rd (c(deltaHaRd))
#' @param Tresponsegm A vector that gives the temperature response parameters for the
#' gm (c(deltaHagm,deltaHdgm,deltaSgm))
#' @param TresponseVpmax A vector that gives the temperature response parameters for the
#' Vpmax (c(deltaHaVpmax,deltaHdVpmax,deltaSVpmax))
#' @return This package will return a dataframe that contains the following values
#' (c(Vcmax,J,Rd,gm and Vpmax)). You can try with c(30, 150, 3, 10, 50).
#' @return Parameter at leaf temperature: A vector (c(Vcmax,J,Rd,gm and Vpmax))
#' returns the estimation parameters at leaf temperature.
#' @return Parameter at 25°C: A vector (c(Vcmax,J,Rd,gm and Vpmax))
#' returns the estimation parameters at leaf temperature.
#' @return Objective: The final objective value based on the
#' estimation results.
#' @return Convergence: An integer code. 0 indicates successful
#' convergence.
#' @return Message: A character string giving any additional
#' information returned by the optimizer, or NULL. For details, see PORT documentation.
#' @return Iterations: Number of iterations performed.
#' @return Evaluations: Number of objective function and gradient
#' function evaluations.
#' @export
C4EstimateWithoutCAT<- function(ACi,Tleaf,Patm,alpha1,x,CaBreakL,CaBreakH,startp,
TresponseKc,TresponseKo,Tresponsegammastar,
TresponseKp,Tresponsegbs,TresponseVcmax,TresponseJ,
TresponseRd,Tresponsegm,TresponseVpmax)
{
A.obs <- ACi$A
Ci.obs<-ACi$Ci*Patm*0.001
O2<-Patm*0.21
Kc25 <- TresponseKc[1]
deltaHaKc <- TresponseKc[2]
Ko25 <- TresponseKo[1]
deltaHaKo <- TresponseKo[2]
gammastar25 <- Tresponsegammastar[1]
deltaHagammastar <- Tresponsegammastar[2]
Kp25 <- TresponseKp[1]
deltaHaKp <- TresponseKp[2]
gbs25 <- Tresponsegbs[1]
deltaHagbs <- Tresponsegbs[2]
deltaHdgbs <- Tresponsegbs[3]
deltaSgbs <- Tresponsegbs[4]
deltaHaVcmax <- TresponseVcmax[1]
deltaHaJ <- TresponseJ[1]
deltaHdJ <- TresponseJ[2]
deltaSJ <- TresponseJ[3]
deltaHaRd <- TresponseRd[1]
deltaHagm <- Tresponsegm[1]
deltaHdgm <- Tresponsegm[2]
deltaSgm <- Tresponsegm[3]
deltaHaVpmax <- TresponseVpmax[1]
deltaHdVpmax <- TresponseVpmax[2]
deltaSVpmax <- TresponseVpmax[3]
#Temperature adjustment for Kc,Ko,gammastar,Kp from 25°C to Tleaf
Kc<-Kc25*exp(deltaHaKc*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
Ko<-Ko25*exp(deltaHaKo*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
gammastar<-gammastar25*exp(deltaHagammastar*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
Kp<-Kp25*exp(deltaHaKp*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
gbs<-gbs25*exp(deltaHagbs*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*deltaSgbs-deltaHdgbs)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*deltaSgbs-deltaHdgbs)/(273.15+Tleaf)/0.008314))
#Define objective function
fn<-function(Param){
Vcmax <- Param[1]
J <- Param[2]
Rd <- Param[3]
gm <- Param[4]
Vpmax <- Param[5]
Rm <- Rd/2
#Useful intermediates
Obs <- alpha1*(A.obs+Rd)/(0.047*gbs)/1000+O2
Vpc <- Vpmax*(gm*Ci.obs-A.obs)/(gm*Ci.obs-A.obs+Kp*gm)
Vpr <- x*J/2
Cbspc <- Vpmax*(gm*Ci.obs-A.obs)/(gm*Ci.obs-A.obs+Kp*gm)/gbs-
Rd/2/gbs-A.obs/gbs+Ci.obs-A.obs/gm
Cbspr <- x*J/2/gbs-Rd/2/gbs-A.obs/gbs+Ci.obs-A.obs/gm
Acpc <- Vcmax*(Cbspc-gammastar*Obs*1000)/(Cbspc+Kc*(1+Obs/Ko))
Acpr <- Vcmax*(Cbspr-gammastar*Obs*1000)/(Cbspr+Kc*(1+Obs/Ko))
Ajpc <- (1-x)*J*(Cbspc-gammastar*Obs*1000)/(4*Cbspc+8*gammastar*Obs*1000)
Ajpr <- (1-x)*J*(Cbspr-gammastar*Obs*1000)/(4*Cbspr+8*gammastar*Obs*1000)
#Objective
sum(((Ci.obs<=CaBreakL)*(Acpc-Rd)+
(Ci.obs>CaBreakL)*(Ci.obs<CaBreakH)*((Vpc<Vpr)*((Acpc<Ajpc)*Acpc+(Acpc>=Ajpc)*Ajpc)+
(Vpc>=Vpr)*((Acpr<Ajpr)*Acpr+(Acpr>=Ajpr)*Ajpr)-Rd)+
(Ci.obs>=CaBreakH)*(Ajpr-Rd)-A.obs)^2)
}
#Using constrained optimization package "nloptr" to estimate Vcmax,J,Rd,gm and Vpmax
Est.model <- nlminb(c(startp[1],startp[2] , startp[3], startp[4], startp[5]),
fn, lower=c(0,0,0,0,0), upper=c(200, 600, 20, 30, 200))
Parameters<-Est.model$par
Vcmax <- Parameters[1]
J <- Parameters[2]
Rd <- Parameters[3]
gm <- Parameters[4]
Vpmax <- Parameters[5]
Rm <- Rd/2
#Temperature adjustment for Vcmax,J,Rd,gm and Vpmax from Tleaf to 25°C
Vcmax25<-Parameters[1]/(exp(deltaHaVcmax*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf))))
J25<-Parameters[2]/(exp(deltaHaJ*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*deltaSJ-deltaHdJ)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*deltaSJ-deltaHdJ)/(273.15+Tleaf)/0.008314)))
Rd25<-Parameters[3]/(exp(deltaHaRd*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf))))
gm25<-Parameters[4]/(exp(deltaHagm*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*deltaSgm-deltaHdgm)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*deltaSgm-deltaHdgm)/(273.15+Tleaf)/0.008314)))
Vpmax25<-Parameters[5]/(exp(deltaHaVpmax*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*deltaSVpmax-deltaHdVpmax)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*deltaSVpmax-deltaHdVpmax)/(273.15+Tleaf)/
0.008314)))
para25<-c(Vcmax25,J25,Rd25,gm25,Vpmax25)
#Calculate the estimation results
#Useful intermediate
O2<-Patm*0.21*1000
x1_ac <- Vcmax
x2_ac <- Kc/Ko/1000
x3_ac <- Kc
deno_ac <- gm+gbs-x2_ac*gm*alpha1/0.047
x1_aj <- (1-x)*J/4
x2_aj <- 2*gammastar
x3_aj <- 0
deno_aj <- gm+gbs-x2_aj*gm*alpha1/0.047
#Explicit calculation of AEE and AET
d <- gm*(Rm-Vpmax-Ci.obs*(gm+2*gbs)-Kp*(gm + gbs))
f <- gm*gm*(Ci.obs*Vpmax+(Ci.obs+Kp)*(gbs*Ci.obs-Rm))
k <- gm*gm*gbs*(Ci.obs+Kp)
RcPc_p <- (d-(x3_ac+x2_ac*O2)*gm*gbs+(Rd-x1_ac)*(gm+gbs)-
(x1_ac*gammastar*gm+x2_ac*Rd*gm-x2_ac*k/gbs)*alpha1/0.047)/deno_ac
RrPc_p <- (d-(x3_aj+x2_aj*O2)*gm*gbs+(Rd-x1_aj)*(gm+gbs)-
(x1_aj*gammastar*gm+x2_aj*Rd*gm-x2_aj*k/gbs)*alpha1/0.047)/deno_aj
RcPc_q <- (f+(x3_ac+x2_ac*O2)*k+d*(Rd-x1_ac)-gm*gbs*
(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))+
(x1_ac*gammastar+x2_ac*Rd)*k*alpha1/0.047/gbs)/deno_ac
RrPc_q <- (f+(x3_aj+x2_aj*O2)*k+d*(Rd-x1_aj)-gm*gbs*
(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))+
(x1_aj*gammastar+x2_aj*Rd)*k*alpha1/0.047/gbs)/deno_aj
RcPc_r <- (Rd*(f+(x3_ac+x2_ac*O2)*k)-x1_ac*(f-k*gammastar*O2))/deno_ac
RrPc_r <- (Rd*(f+(x3_aj+x2_aj*O2)*k)-x1_aj*(f-k*gammastar*O2))/deno_aj
RcPc_Q <- (RcPc_p*RcPc_p-3*RcPc_q)/9
RrPc_Q <- (RrPc_p*RrPc_p-3*RrPc_q)/9
RcPc_U <- (2*RcPc_p^3-9*RcPc_p*RcPc_q+27*RcPc_r)/54
RrPc_U <- (2*RrPc_p^3-9*RrPc_p*RrPc_q+27*RrPc_r)/54
RcPc_PHI <- acos(RcPc_U/(RcPc_Q^3)^0.5)
RrPc_PHI <- acos(RrPc_U/(RrPc_Q^3)^0.5)
RcPc <- -2*RcPc_Q^0.5*cos(RcPc_PHI/3)-RcPc_p/3
RrPc <- -2*RrPc_Q^0.5*cos(RrPc_PHI/3)-RrPc_p/3
##Explicit calculation of ATE and ATT
Vpr <- x*J/2
a_ac <- x2_ac*gm*alpha1/0.047-gm-gbs
a_aj <- x2_aj*gm*alpha1/0.047-gm-gbs
b_ac <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_ac+x2_ac*O2)*gm*gbs+
(x1_ac*gammastar+x2_ac*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_ac-Rd)
b_aj <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_aj+x2_aj*O2)*gm*gbs+
(x1_aj*gammastar+x2_aj*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_aj-Rd)
c_ac <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_ac-Rd)+gm*gbs*
(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))
c_aj <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_aj-Rd)+gm*gbs*
(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))
RcPr <- (-b_ac+(b_ac^2-4*a_ac*c_ac)^0.5)/2/a_ac
RrPr <- (-b_aj+(b_aj^2-4*a_aj*c_aj)^0.5)/2/a_aj
Vpc_RcPc <- Vpmax*(Ci.obs-RcPc/gm)/((Ci.obs-RcPc/gm)+Kp)
Vpc_RrPc <- Vpmax*(Ci.obs-RrPc/gm)/((Ci.obs-RrPc/gm)+Kp)
Ac <- (Vpc_RcPc<=Vpr)*RcPc+(Vpc_RcPc>Vpr)*RcPr
Aj <- (Vpc_RrPc<=Vpr)*RrPc+(Vpc_RrPc>Vpr)*RrPr
#Calculate the real estimation error term
Error1 <- sum((A.obs-(Ac<=Aj)*Ac-(Ac>Aj)*Aj)^2)
#Write a while loop to compare Ac and Aj
Count<-length(Ci.obs)
limitation <- rep(0,Count)
i <- 1
while (i<=Count){
limitation[i] <- 2*(Ac[i]>=Aj[i])+1*(Ac[i]<Aj[i])
i=i+1
}
#Print out to see whether there is inadmissible fit
print("Print out to see whether there is inadmissible fit")
Ci_name <- "Ci"
limitation_name <- "limitation_state"
df <- data.frame(Ci.obs,limitation)
colnames(df)<-c(Ci_name,limitation_name)
print(df)
#Plot the estimation and observation
print("Plot the observed A and estimated Ac and Aj")
xrange<-max(Ci.obs)
yrange<-max(RcPc)
plot(Ci.obs,A.obs, type="p",col="blue",xlim=range(0,xrange),ylim=range(0,yrange),
pch=20, xlab="Ci(Pa)",ylab="A")
lines(Ci.obs,RcPc, type="l",col="red4",lwd=2)
lines(Ci.obs,RcPr,type="l",col="red",lwd=2)
lines(Ci.obs,RrPc, type="l",col="green",lwd=2)
lines(Ci.obs,RrPr,type="l",col="green4",lwd=2)
leg.text<-c("Obs A", "Cal RcPc", "Cal RcPr","Cal RrPc","Cal RrPr")
xrange<-max(Ci.obs)
legend(xrange-20,7,leg.text,col=c("blue","red4","red","green","green4"),
pch=c(20,NA,NA,NA,NA),lty=c(0,1,1,1,1),cex=0.5,lwd=c(0,2,2,2,2))
#Return the estimation results
EstF<-list(Est.model$par,para25,Est.model$objective,Error1,Est.model$convergence,
Est.model$iterations,Est.model$evaluations,Est.model$message)
EstFinal<-setNames(EstF,c("Parameter at leaf temperature","Parameter at 25°C",
"Objective","Estimation Error","Convergence","Iterations",
"Evaluations","Message"))
return(EstFinal)
}
| /R/C4EstimateWithoutCAT.R | no_license | zhouhaoran06/C4-Parameter-Estimation | R | false | false | 12,934 | r | #' Estimate photosynthesis parameters for C4 species using Sharkey's fitting procedure
#'
#' Using the gas exchange measurement (A_Ci curve), C4 photosynthesis model without
#' carbonic anhydrase and Sharkey et al. (2007) fitting processure to do nonlinear
#' curve fitting (using nlminb package) for estimating photosynthesis parameters
#' (Vcmax,J,Rd,gm and Vpmax) for C4 species. The difference
#' between this method with C4EstimateWithoutCA is that temperature response
#' parameters need to be provided by the users. Thus, this method provides the option
#' to alter temperature response parameters. If only planing to alter several
#' parameters, not all of them, one can use the other parameters provided by
#' Table S1 in Zhou et al. (2019) ("Deriving C4 photosynthesis parameters by fitting
#' intensive A/Ci curves"). Make sure to load the "stats" package
#' before intstalling and using the "C4Estimation" package.
#' @param ACi Gas exchange measurement from Li6400 or other equipment. It is a
#' dataframe iput. Ci with the unit of ppm. You can prepare the data in Excel file
#' like the given example and save it as "tab delimited text". Then import data by
#' ACi <- read.table(file = "/Users/haoranzhou/Desktop/Learn R/ACi curve.txt",header
#' = TRUE)
#' @param Tleaf Leaf temperature when A_Ci curve is measured.
#' @param Patm Atmosphere pressure when A_Ci curve is measured.
#' @param alpha1 The fraction of O2 evolution occurring in the bundle sheath. Unless
#' you have enough information, input it as the 0.15.
#' @param x the fraction of total electron transport that are confined to be
#' used for the PEP regeneration out of J, which is the total electron transport.
#' @param CaBreakL Higher bound of Ci below which A is thought to be controled by
#' Rubisco Carboxylation (Ac). Start with 10.
#' @param CaBreakH Lower bound of Ci above which A is thought to be controled by RuBP
#' regeneration (Aj). Start with 50. If the estimation results showed "inadmissible
#' fits", change the CaBreakL and CaBreakH until "inadmissible fits" disappear.
#' @param startp A vector that gives the start points for the estimation (c(Vcmax,
#' J,Rd,gm and Vpmax))
#' @param TresponseKc A vector that gives the temperature response parameters for the
#' Kc (c(Kc25,deltaHaKc))
#' @param TresponseKo A vector that gives the temperature response parameters for the
#' Ko (c(Ko25,deltaHaKo))
#' @param Tresponsegammastar A vector that gives the temperature response parameters for the
#' gammastar (c(gammastar25,deltaHagammastar))
#' @param TresponseKp A vector that gives the temperature response parameters for the
#' Kp (c(Kp25,deltaHaKp))
#' @param Tresponsegbs A vector that gives the temperature response parameters for the
#' gbs (c(gbs25,deltaHagbs,deltaHdgbs,deltaSgbs))
#' @param TresponseVcmax A vector that gives the temperature response parameters for the
#' Vcmax (c(deltaHaVcmax))
#' @param TresponseJ A vector that gives the temperature response parameters for the
#' J (c(deltaHaJ,deltaHdJ,deltaSJ))
#' @param TresponseRd A vector that gives the temperature response parameters for the
#' Rd (c(deltaHaRd))
#' @param Tresponsegm A vector that gives the temperature response parameters for the
#' gm (c(deltaHagm,deltaHdgm,deltaSgm))
#' @param TresponseVpmax A vector that gives the temperature response parameters for the
#' Vpmax (c(deltaHaVpmax,deltaHdVpmax,deltaSVpmax))
#' @return This package will return a dataframe that contains the following values
#' (c(Vcmax,J,Rd,gm and Vpmax)). You can try with c(30, 150, 3, 10, 50).
#' @return Parameter at leaf temperature: A vector (c(Vcmax,J,Rd,gm and Vpmax))
#' returns the estimation parameters at leaf temperature.
#' @return Parameter at 25°C: A vector (c(Vcmax,J,Rd,gm and Vpmax))
#' returns the estimation parameters at leaf temperature.
#' @return Objective: The final objective value based on the
#' estimation results.
#' @return Convergence: An integer code. 0 indicates successful
#' convergence.
#' @return Message: A character string giving any additional
#' information returned by the optimizer, or NULL. For details, see PORT documentation.
#' @return Iterations: Number of iterations performed.
#' @return Evaluations: Number of objective function and gradient
#' function evaluations.
#' @export
C4EstimateWithoutCAT<- function(ACi,Tleaf,Patm,alpha1,x,CaBreakL,CaBreakH,startp,
TresponseKc,TresponseKo,Tresponsegammastar,
TresponseKp,Tresponsegbs,TresponseVcmax,TresponseJ,
TresponseRd,Tresponsegm,TresponseVpmax)
{
A.obs <- ACi$A
Ci.obs<-ACi$Ci*Patm*0.001
O2<-Patm*0.21
Kc25 <- TresponseKc[1]
deltaHaKc <- TresponseKc[2]
Ko25 <- TresponseKo[1]
deltaHaKo <- TresponseKo[2]
gammastar25 <- Tresponsegammastar[1]
deltaHagammastar <- Tresponsegammastar[2]
Kp25 <- TresponseKp[1]
deltaHaKp <- TresponseKp[2]
gbs25 <- Tresponsegbs[1]
deltaHagbs <- Tresponsegbs[2]
deltaHdgbs <- Tresponsegbs[3]
deltaSgbs <- Tresponsegbs[4]
deltaHaVcmax <- TresponseVcmax[1]
deltaHaJ <- TresponseJ[1]
deltaHdJ <- TresponseJ[2]
deltaSJ <- TresponseJ[3]
deltaHaRd <- TresponseRd[1]
deltaHagm <- Tresponsegm[1]
deltaHdgm <- Tresponsegm[2]
deltaSgm <- Tresponsegm[3]
deltaHaVpmax <- TresponseVpmax[1]
deltaHdVpmax <- TresponseVpmax[2]
deltaSVpmax <- TresponseVpmax[3]
#Temperature adjustment for Kc,Ko,gammastar,Kp from 25°C to Tleaf
Kc<-Kc25*exp(deltaHaKc*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
Ko<-Ko25*exp(deltaHaKo*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
gammastar<-gammastar25*exp(deltaHagammastar*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
Kp<-Kp25*exp(deltaHaKp*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
gbs<-gbs25*exp(deltaHagbs*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*deltaSgbs-deltaHdgbs)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*deltaSgbs-deltaHdgbs)/(273.15+Tleaf)/0.008314))
#Define objective function
fn<-function(Param){
Vcmax <- Param[1]
J <- Param[2]
Rd <- Param[3]
gm <- Param[4]
Vpmax <- Param[5]
Rm <- Rd/2
#Useful intermediates
Obs <- alpha1*(A.obs+Rd)/(0.047*gbs)/1000+O2
Vpc <- Vpmax*(gm*Ci.obs-A.obs)/(gm*Ci.obs-A.obs+Kp*gm)
Vpr <- x*J/2
Cbspc <- Vpmax*(gm*Ci.obs-A.obs)/(gm*Ci.obs-A.obs+Kp*gm)/gbs-
Rd/2/gbs-A.obs/gbs+Ci.obs-A.obs/gm
Cbspr <- x*J/2/gbs-Rd/2/gbs-A.obs/gbs+Ci.obs-A.obs/gm
Acpc <- Vcmax*(Cbspc-gammastar*Obs*1000)/(Cbspc+Kc*(1+Obs/Ko))
Acpr <- Vcmax*(Cbspr-gammastar*Obs*1000)/(Cbspr+Kc*(1+Obs/Ko))
Ajpc <- (1-x)*J*(Cbspc-gammastar*Obs*1000)/(4*Cbspc+8*gammastar*Obs*1000)
Ajpr <- (1-x)*J*(Cbspr-gammastar*Obs*1000)/(4*Cbspr+8*gammastar*Obs*1000)
#Objective
sum(((Ci.obs<=CaBreakL)*(Acpc-Rd)+
(Ci.obs>CaBreakL)*(Ci.obs<CaBreakH)*((Vpc<Vpr)*((Acpc<Ajpc)*Acpc+(Acpc>=Ajpc)*Ajpc)+
(Vpc>=Vpr)*((Acpr<Ajpr)*Acpr+(Acpr>=Ajpr)*Ajpr)-Rd)+
(Ci.obs>=CaBreakH)*(Ajpr-Rd)-A.obs)^2)
}
#Using constrained optimization package "nloptr" to estimate Vcmax,J,Rd,gm and Vpmax
Est.model <- nlminb(c(startp[1],startp[2] , startp[3], startp[4], startp[5]),
fn, lower=c(0,0,0,0,0), upper=c(200, 600, 20, 30, 200))
Parameters<-Est.model$par
Vcmax <- Parameters[1]
J <- Parameters[2]
Rd <- Parameters[3]
gm <- Parameters[4]
Vpmax <- Parameters[5]
Rm <- Rd/2
#Temperature adjustment for Vcmax,J,Rd,gm and Vpmax from Tleaf to 25°C
Vcmax25<-Parameters[1]/(exp(deltaHaVcmax*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf))))
J25<-Parameters[2]/(exp(deltaHaJ*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*deltaSJ-deltaHdJ)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*deltaSJ-deltaHdJ)/(273.15+Tleaf)/0.008314)))
Rd25<-Parameters[3]/(exp(deltaHaRd*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf))))
gm25<-Parameters[4]/(exp(deltaHagm*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*deltaSgm-deltaHdgm)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*deltaSgm-deltaHdgm)/(273.15+Tleaf)/0.008314)))
Vpmax25<-Parameters[5]/(exp(deltaHaVpmax*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*deltaSVpmax-deltaHdVpmax)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*deltaSVpmax-deltaHdVpmax)/(273.15+Tleaf)/
0.008314)))
para25<-c(Vcmax25,J25,Rd25,gm25,Vpmax25)
#Calculate the estimation results
#Useful intermediate
O2<-Patm*0.21*1000
x1_ac <- Vcmax
x2_ac <- Kc/Ko/1000
x3_ac <- Kc
deno_ac <- gm+gbs-x2_ac*gm*alpha1/0.047
x1_aj <- (1-x)*J/4
x2_aj <- 2*gammastar
x3_aj <- 0
deno_aj <- gm+gbs-x2_aj*gm*alpha1/0.047
#Explicit calculation of AEE and AET
d <- gm*(Rm-Vpmax-Ci.obs*(gm+2*gbs)-Kp*(gm + gbs))
f <- gm*gm*(Ci.obs*Vpmax+(Ci.obs+Kp)*(gbs*Ci.obs-Rm))
k <- gm*gm*gbs*(Ci.obs+Kp)
RcPc_p <- (d-(x3_ac+x2_ac*O2)*gm*gbs+(Rd-x1_ac)*(gm+gbs)-
(x1_ac*gammastar*gm+x2_ac*Rd*gm-x2_ac*k/gbs)*alpha1/0.047)/deno_ac
RrPc_p <- (d-(x3_aj+x2_aj*O2)*gm*gbs+(Rd-x1_aj)*(gm+gbs)-
(x1_aj*gammastar*gm+x2_aj*Rd*gm-x2_aj*k/gbs)*alpha1/0.047)/deno_aj
RcPc_q <- (f+(x3_ac+x2_ac*O2)*k+d*(Rd-x1_ac)-gm*gbs*
(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))+
(x1_ac*gammastar+x2_ac*Rd)*k*alpha1/0.047/gbs)/deno_ac
RrPc_q <- (f+(x3_aj+x2_aj*O2)*k+d*(Rd-x1_aj)-gm*gbs*
(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))+
(x1_aj*gammastar+x2_aj*Rd)*k*alpha1/0.047/gbs)/deno_aj
RcPc_r <- (Rd*(f+(x3_ac+x2_ac*O2)*k)-x1_ac*(f-k*gammastar*O2))/deno_ac
RrPc_r <- (Rd*(f+(x3_aj+x2_aj*O2)*k)-x1_aj*(f-k*gammastar*O2))/deno_aj
RcPc_Q <- (RcPc_p*RcPc_p-3*RcPc_q)/9
RrPc_Q <- (RrPc_p*RrPc_p-3*RrPc_q)/9
RcPc_U <- (2*RcPc_p^3-9*RcPc_p*RcPc_q+27*RcPc_r)/54
RrPc_U <- (2*RrPc_p^3-9*RrPc_p*RrPc_q+27*RrPc_r)/54
RcPc_PHI <- acos(RcPc_U/(RcPc_Q^3)^0.5)
RrPc_PHI <- acos(RrPc_U/(RrPc_Q^3)^0.5)
RcPc <- -2*RcPc_Q^0.5*cos(RcPc_PHI/3)-RcPc_p/3
RrPc <- -2*RrPc_Q^0.5*cos(RrPc_PHI/3)-RrPc_p/3
##Explicit calculation of ATE and ATT
Vpr <- x*J/2
a_ac <- x2_ac*gm*alpha1/0.047-gm-gbs
a_aj <- x2_aj*gm*alpha1/0.047-gm-gbs
b_ac <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_ac+x2_ac*O2)*gm*gbs+
(x1_ac*gammastar+x2_ac*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_ac-Rd)
b_aj <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_aj+x2_aj*O2)*gm*gbs+
(x1_aj*gammastar+x2_aj*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_aj-Rd)
c_ac <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_ac-Rd)+gm*gbs*
(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))
c_aj <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_aj-Rd)+gm*gbs*
(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))
RcPr <- (-b_ac+(b_ac^2-4*a_ac*c_ac)^0.5)/2/a_ac
RrPr <- (-b_aj+(b_aj^2-4*a_aj*c_aj)^0.5)/2/a_aj
Vpc_RcPc <- Vpmax*(Ci.obs-RcPc/gm)/((Ci.obs-RcPc/gm)+Kp)
Vpc_RrPc <- Vpmax*(Ci.obs-RrPc/gm)/((Ci.obs-RrPc/gm)+Kp)
Ac <- (Vpc_RcPc<=Vpr)*RcPc+(Vpc_RcPc>Vpr)*RcPr
Aj <- (Vpc_RrPc<=Vpr)*RrPc+(Vpc_RrPc>Vpr)*RrPr
#Calculate the real estimation error term
Error1 <- sum((A.obs-(Ac<=Aj)*Ac-(Ac>Aj)*Aj)^2)
#Write a while loop to compare Ac and Aj
Count<-length(Ci.obs)
limitation <- rep(0,Count)
i <- 1
while (i<=Count){
limitation[i] <- 2*(Ac[i]>=Aj[i])+1*(Ac[i]<Aj[i])
i=i+1
}
#Print out to see whether there is inadmissible fit
print("Print out to see whether there is inadmissible fit")
Ci_name <- "Ci"
limitation_name <- "limitation_state"
df <- data.frame(Ci.obs,limitation)
colnames(df)<-c(Ci_name,limitation_name)
print(df)
#Plot the estimation and observation
print("Plot the observed A and estimated Ac and Aj")
xrange<-max(Ci.obs)
yrange<-max(RcPc)
plot(Ci.obs,A.obs, type="p",col="blue",xlim=range(0,xrange),ylim=range(0,yrange),
pch=20, xlab="Ci(Pa)",ylab="A")
lines(Ci.obs,RcPc, type="l",col="red4",lwd=2)
lines(Ci.obs,RcPr,type="l",col="red",lwd=2)
lines(Ci.obs,RrPc, type="l",col="green",lwd=2)
lines(Ci.obs,RrPr,type="l",col="green4",lwd=2)
leg.text<-c("Obs A", "Cal RcPc", "Cal RcPr","Cal RrPc","Cal RrPr")
xrange<-max(Ci.obs)
legend(xrange-20,7,leg.text,col=c("blue","red4","red","green","green4"),
pch=c(20,NA,NA,NA,NA),lty=c(0,1,1,1,1),cex=0.5,lwd=c(0,2,2,2,2))
#Return the estimation results
EstF<-list(Est.model$par,para25,Est.model$objective,Error1,Est.model$convergence,
Est.model$iterations,Est.model$evaluations,Est.model$message)
EstFinal<-setNames(EstF,c("Parameter at leaf temperature","Parameter at 25°C",
"Objective","Estimation Error","Convergence","Iterations",
"Evaluations","Message"))
return(EstFinal)
}
|
date()
packageVersion("nlmeU")
message("Code for first part of Chapter 17 (from Panel R17.1 til R17.13) is executed below.")
message("Code for remaining panels pertaining to pdKronecker class is distributed with nlmeUpdK package (Jun.10, 2013)")
###################################################
### code chunk: Chap17init
###################################################
options(digits = 5, show.signif.stars = FALSE)
sessionInfo()
library(nlme)
library(lattice)
###################################################
### code chunk: R17.1a
###################################################
data(prt, package = "nlmeU")
lme.spec.form1 <-
formula(spec.fo ~ (prt.f + occ.f)^2 + sex.f + age.f +
sex.f:age.f + bmi)
prt1 <- subset(prt, fiber.f == "Type 1", select = -fiber.f)
fm17.1 <- # M17.1 (17.1)
lme(lme.spec.form1,
random = ~occ.f - 1|id, # Random effects structure (including D)
data = prt1)
###################################################
### code chunk: R17.1b
###################################################
getGroupsFormula(fm17.1)
str(grpF <- getGroups(fm17.1))
nF1 <- xtabs(~grpF) # Number of type-1 fibers per subject
range(nF1) # Min, max number of type-1 fibers
nF1[which.min(nF1)] # Subject with the minimum number of fibers
str(fm17.1$dims) # Basic dimensions used in the fit
###################################################
### code chunk: R17.2
###################################################
fixed1 <- summary(fm17.1)$tTable # beta, se(beta), t-test
nms <- rownames(fixed1) # beta names
nms[7:8] <- c("fLow:fPos", "fMale:fOld") # Selected names shortened
rownames(fixed1) <- nms # New names assigned
printCoefmat(fixed1, digits = 3, # See also Table 17.1
has.Pvalue = TRUE, P.values = TRUE)
###################################################
### code chunk: R17.3a
###################################################
getVarCov(fm17.1) # D: (17.3)
VarCorr(fm17.1)
###################################################
### code chunk: R17.3b
###################################################
Ri <- # Ri is a list containing R_i ...
getVarCov(fm17.1, c("5", "275"),# ... for subjects "5" and "275".
type = "conditional")
Ri$"275" # R_i for the subject "275" (17.2)
Ri.5 <- Ri$"5" # R_i for the subject "5" ...
dim(Ri.5) # ... with large dimensions ...
(Ri.5d <- diag(Ri.5)[1:6]) # ... its first 6 diagonal elements.
sgma <- summary(fm17.1)$sigma # sigma
sgma^2 # sigma^2
###################################################
### code chunk: R17.4a
###################################################
dt5 <- # Data with 30 observations
subset(prt1,
select = c(id, occ.f), # ... and 2 variables
id == "5") # ... for the subject "5".
auxF1 <- function(elv) {
idx <- 1:min(length(elv), 2) # Up to two indices per vector
elv[idx] # ... returned.
}
(i.u5 <- # Selected indices printed
unlist(
tapply(rownames(dt5), # ... for the subject "5"
dt5$occ.f, # ... by occ.f subgroups
FUN = auxF1)))
dt.u5 <- dt5[i.u5, ] # Raw data for selected indices
(nms.u5 <- # Row names constructed
paste(i.u5, dt.u5$occ.f, sep = "."))
###################################################
### code chunk: R17.4b
###################################################
Vi <- # Vi is a list containing ...
getVarCov(fm17.1, "5", # ... matrix V_i for subject "5".
type = "marginal")
Vi.5 <- Vi$"5" # Vi.5 is a V_i matrix: (17.4)
Vi.u5 <- Vi.5[i.u5, i.u5] # A sub-matrix selected, ...
rownames(Vi.u5) <- nms.u5 # ... row/column names changed,
Vi.u5 # ... the sub-matrix printed.
cov2cor(Vi.u5) # Corr(V_i)
###################################################
### code chunk: R17.5
###################################################
rnf <- ranef(fm17.1) # b_i: (13.50)
(vrnf <- var(rnf)) # var(b_i). Compare to D in R17.13a.
plot(rnf) # Side-by-side plot (Fig. 17.1a)
library(ellipse)
myPanel <- function(x,y, ...){
panel.grid(h = -1, v = -1)
panel.xyplot(x, y)
ex1 <- # Ellipse based on D: (17.3)
ellipse(getVarCov(fm17.1))
panel.xyplot(ex1[, 1], ex1[, 2], type = "l", lty = 1)
ex2 <- ellipse(vrnf) # Ellipse based on var(b_i).
panel.xyplot(ex2[ ,1], ex2[, 2], type = "l", lty = 2)
}
xyplot(rnf[, 2] ~ rnf[, 1], # Scatterplot b_i1 versus b_i0 (Fig. 17.1b)
xlab = "Pre-intervention",
ylab = "Post-intervention",
xlim = c(-40, 40), ylim = c(-40, 40),
panel = myPanel)
###################################################
### code chunk: R17.6
###################################################
prt1r <- # Auxiliary data
within(prt1,
{ # Pearson residuals
residP1 <- residuals(fm17.1, type = "p")
fitted1 <- fitted(fm17.1)
})
range(prt1r$residP1) # Info for y-axis range
xyplot(residP1 ~ fitted1| occ.f, # Resids vs fitted (Fig. 17.2a)
data = prt1r, ylim = c(-6, 6),
type = c("p", "smooth"),
grid = TRUE)
qqnorm(prt1r$residP1); qqline(prt1r$residP1) # Q-Q plot (Fig. 17.3a)
###################################################
### code chunk: R17.7
###################################################
fm17.2 <- # M17.2 <- M17.1
update(fm17.1,
weights = varPower(form = ~fitted(.)),
data = prt1)
intervals(fm17.2)$varStruct # 95% CI for delta, (17.5)
anova(fm17.1, fm17.2) # H0: delta = 0 (M17.1 nested in M17.2)
###################################################
### code chunk: R17.8a
###################################################
lme.spec.form3 <-
update(lme.spec.form1, # M17.3 <- M17.1
. ~ . + fiber.f + prt.f:fiber.f + occ.f:fiber.f)
fm17.3 <-
lme(lme.spec.form3, # (17.6)
random = ~occ.f:fiber.f - 1|id, # D(17.7)
data = prt)
###################################################
### code chunk: R17.8b
###################################################
fixed.D4 <- summary(fm17.3)$tTable # beta, se(beta), t-test
rnms <- rownames(fixed.D4) # beta names (not shown)
rnms[8:11] <- # Selected names shortened
c("Low:Pos", "Low:Type2", "Pos:Type2", "Male:Old")
rownames(fixed.D4) <- rnms # Short names assigned
printCoefmat(fixed.D4, digits = 3, zap.ind = 5)
###################################################
### code chunk: R17.9a
###################################################
fm17.3cov <- # D: (17.7) extracted
getVarCov(fm17.3, type = "random.effect")
rownames(fm17.3cov) # Long names ...
nms. <- c("T1.Pre", "T1.Pos", "T2.Pre", "T2.Pos")# ... abbreviated
dimnames(fm17.3cov) <- list(nms., nms.) # ... and reassigned.
fm17.3cov # D: (17.7) printed
fm17.3cor <- cov2cor(fm17.3cov) # Corr(D) ...
print(fm17.3cor, digits = 2, # ...printed.
corr = TRUE, stdevs = FALSE)
###################################################
### code chunk: R17.9b
###################################################
dim(R.5 <- # Dims of R_i ...
getVarCov(fm17.3,
type = "conditional")[["5"]]) # ... for subject "5".
diag(R.5)[1:6] # First 6 diagonal elements
(sgma <- fm17.3$sigma) # sigma
print(sgma^2) # sigma^2
###################################################
### code chunk: R17.10
###################################################
CI <- intervals(fm17.3, which = "var-cov") # 95% CIs for theta_D
interv <- CI$reStruct$id
# rownames(interv) # Long names (not shown)
thDnms <-
c("sd(T1Pre)", "sd(T1Pos)", "sd(T2Pre)", "sd(T2Pos)",
"cor(T1Pre,T1Pos)", "cor(T1Pre,T2Pre)", "cor(T1Pre,T2Pos)",
"cor(T1Pos,T2Pre)", "cor(T1Pos,T2Pos)",
"cor(T2Pre,T2Pos)")
rownames(interv) <- thDnms # Short names assigned
interv # CIs printed
###################################################
### code chunk: R17.11
###################################################
residP3 <- residuals(fm17.3, type = "p") # Pearson residuals
xyplot(residP3 ~ fitted(fm17.3)| # Scatterplots ...
fiber.f:occ.f, # ...per type*occasion (Fig. 17.4)
data = prt,
type = c("p", "smooth"))
qqnorm(residP3); qqline(residP3) # Q-Q plot (Fig. 17.5)
###################################################
### code chunk: R17.12a
###################################################
Vx <- # Vx is a list ...
getVarCov(fm17.3, type = "marginal",
individual = "5") # ... with one component.
Vmtx.5 <- Vx$"5" # Vmtx.5 is V_i matrix (17.8)...
dim(Vmtx.5) # ... with large dimensions.
dt5 <- # Data with 41 rows ...
subset(prt,
select = c(id, fiber.f, occ.f), # ... and 3 variables ...
id == "5") # ... for subject "5".
###################################################
### code chunk: R17.12b
###################################################
(i.u5 <- unlist( # Selected indices printed.
tapply(rownames(dt5), # Indices for subject "5" ...
list(dt5$fiber.f, dt5$occ.f), # ... by fiber.f and occ.f.
FUN = auxF1)))
dt.u5 <- dt5[i.u5, ] # Raw data for selected indices
nms.u5 <-
paste(format(i.u5, 2, justify = "right"),
abbreviate(dt.u5$fiber.f, 2), # Row names abbreviated
dt.u5$occ.f, sep = ".")
###################################################
### code chunk: R17.12c
###################################################
Vmtx.u5 <- Vmtx.5[i.u5, i.u5] # Submatrix of V_i for subject "5"
dimnames(Vmtx.u5) <- list(nms.u5, i.u5) # dimnames assigned
Cmtx.u5 <- cov2cor(Vmtx.u5) # Submatrix of Corr(V_i)
uptri <- upper.tri(Cmtx.u5) # Logical matrix
Vmtx.u5[uptri] <- Cmtx.u5[uptri]
print(Vmtx.u5, digits = 2) # Submatrix printed
###################################################
### code chunk: R17.13a
###################################################
fm17.3a <-
lme(lme.spec.form3, # M17.3a
random = ~1 + fiber.f + occ.f + fiber.f:occ.f|id,
data = prt)
print(fm17.3a$sigma, digits = 4) # sigma
fm17.3acov <- # D
getVarCov(fm17.3a,
type = "random.effect", individual = "5")
dimnames(fm17.3acov)[[1]] # Row/col D names ...
nms <- c("(Int)", "T2", "Pos", "T2:Pos") # ... shortened
dimnames(fm17.3acov) <- list(nms,nms) # ... and assigned.
print(fm17.3acov, digits = 4) # D printed
###################################################
### code chunk: R17.13b
###################################################
td <- # T_D: (17.12) created...
matrix(c(1, 0, 0, 0,
1, 0, 1, 0,
1, 1, 0, 0,
1, 1, 1, 1),
nrow = 4, ncol = 4, byrow = TRUE)
mat.D4 <- td %*% fm17.3acov %*% t(td) # ... and applied.
dimnames(mat.D4) <- list(nms., nms.) # Row/col names shortened.
print(mat.D4, digits = 5) # D:(17.7); see R17.9.
###################################################
### code chunk: fig 17.6 using splom() function
###################################################
D173 <- getVarCov(fm17.3)
D173a <- getVarCov(fm17.3a)
nms <- c("T1:Pre/(Int)","T2:Pre/T2","T1:Pos/Pos","T2:Pos/T2:Pos","fitName")
dtref1 <- within(ranef(fm17.3), fitName <- "fm17.3")
names(dtref1)
names(dtref1) <- nms
dtref2 <- within(ranef(fm17.3a), fitName <- "fm17.3a")
names(dtref2)
names(dtref2) <- nms
dtref <- rbind(dtref1, dtref2)
(lims <- range(dtref[,1:4]))
lims <- c(-40,40) # user-defined limits for every variable
atx <- -1
myFunL <- function(corx) ltext(-15, 25, substitute(paste(rho, corx), list(corx = corx)), cex = 0.9)
myFunU <- function(corx) ltext(-15, -32, substitute(paste(rho, corx), list(corx = corx)), cex = 0.9)
my.upperPanel <- ## pairwise.complete.obs
function(x, y, subscripts, ...){
vr <- dtref$fitName == "fm17.3a"
subs <- subscripts[vr]
x1 <- x[subs]
y1 <- y[subs]
panel.grid(h = atx, v = atx, col = "grey", ...)
panel.xyplot(x1, y1, ...)
corx <- round(cor(x1, y1, use = "complete.obs"), 2)
abs.corx <- abs(corx)
corx <- paste("=", corx, sep = "")
myFunU(corx)
}
my.lowerPanel <- ## pairwise.complete.obs
function(x, y, subscripts, ...){
vr <- dtref$fitName == "fm17.3"
subs <- subscripts[vr]
x1 <- x[subs]
y1 <- y[subs]
panel.grid(h=atx,v=atx, col="grey") ## ...lty="13",...)
panel.xyplot(x1, y1, ...)
corx <- round(cor(x1, y1, use = "complete.obs"), 2)
abs.corx <- abs(corx)
corx <- paste("=", corx, sep = "")
print(corx)
cex.value <- 2
rx <- expression(paste(rho,corx))
myFunL(corx)
}
mySuperPanel <- function(z, subscripts, panel.subscripts,...){
panel.pairs(z, subscripts = subscripts,
panel.subscripts = panel.subscripts,
as.matrix = TRUE,
upper.panel = "my.upperPanel",
lower.panel = "my.lowerPanel",
## Possible to shorten syntax. See other splom figures
pscales =list(
"T1:Pre/(Int)" = list(limits = lims),
"T2:Pre/T2" = list(limits = lims),
"T1:Pos/Pos" = list(limits = lims),
"T2:Pos/T2:Pos" = list(limits = lims)) )
print(names(z))
}
abbrev.names <- c("vis0", "vis4", "vis12", "vis24", "vis52")
splom.form <- formula(~as.matrix(dtref[,1:4]))
splom(splom.form,
data = dtref,
as.matrix = TRUE, #### varnames = abbrev.names,
xlab = "",
superpanel = mySuperPanel
)
sessionInfo()
detach(package:nlme)
| /inst/scriptsR2.15.0/Ch17part1.R | no_license | cran/nlmeU | R | false | false | 15,129 | r | date()
packageVersion("nlmeU")
message("Code for first part of Chapter 17 (from Panel R17.1 til R17.13) is executed below.")
message("Code for remaining panels pertaining to pdKronecker class is distributed with nlmeUpdK package (Jun.10, 2013)")
###################################################
### code chunk: Chap17init
###################################################
options(digits = 5, show.signif.stars = FALSE)
sessionInfo()
library(nlme)
library(lattice)
###################################################
### code chunk: R17.1a
###################################################
data(prt, package = "nlmeU")
lme.spec.form1 <-
formula(spec.fo ~ (prt.f + occ.f)^2 + sex.f + age.f +
sex.f:age.f + bmi)
prt1 <- subset(prt, fiber.f == "Type 1", select = -fiber.f)
fm17.1 <- # M17.1 (17.1)
lme(lme.spec.form1,
random = ~occ.f - 1|id, # Random effects structure (including D)
data = prt1)
###################################################
### code chunk: R17.1b
###################################################
getGroupsFormula(fm17.1)
str(grpF <- getGroups(fm17.1))
nF1 <- xtabs(~grpF) # Number of type-1 fibers per subject
range(nF1) # Min, max number of type-1 fibers
nF1[which.min(nF1)] # Subject with the minimum number of fibers
str(fm17.1$dims) # Basic dimensions used in the fit
###################################################
### code chunk: R17.2
###################################################
fixed1 <- summary(fm17.1)$tTable # beta, se(beta), t-test
nms <- rownames(fixed1) # beta names
nms[7:8] <- c("fLow:fPos", "fMale:fOld") # Selected names shortened
rownames(fixed1) <- nms # New names assigned
printCoefmat(fixed1, digits = 3, # See also Table 17.1
has.Pvalue = TRUE, P.values = TRUE)
###################################################
### code chunk: R17.3a
###################################################
getVarCov(fm17.1) # D: (17.3)
VarCorr(fm17.1)
###################################################
### code chunk: R17.3b
###################################################
Ri <- # Ri is a list containing R_i ...
getVarCov(fm17.1, c("5", "275"),# ... for subjects "5" and "275".
type = "conditional")
Ri$"275" # R_i for the subject "275" (17.2)
Ri.5 <- Ri$"5" # R_i for the subject "5" ...
dim(Ri.5) # ... with large dimensions ...
(Ri.5d <- diag(Ri.5)[1:6]) # ... its first 6 diagonal elements.
sgma <- summary(fm17.1)$sigma # sigma
sgma^2 # sigma^2
###################################################
### code chunk: R17.4a
###################################################
dt5 <- # Data with 30 observations
subset(prt1,
select = c(id, occ.f), # ... and 2 variables
id == "5") # ... for the subject "5".
auxF1 <- function(elv) {
idx <- 1:min(length(elv), 2) # Up to two indices per vector
elv[idx] # ... returned.
}
(i.u5 <- # Selected indices printed
unlist(
tapply(rownames(dt5), # ... for the subject "5"
dt5$occ.f, # ... by occ.f subgroups
FUN = auxF1)))
dt.u5 <- dt5[i.u5, ] # Raw data for selected indices
(nms.u5 <- # Row names constructed
paste(i.u5, dt.u5$occ.f, sep = "."))
###################################################
### code chunk: R17.4b
###################################################
Vi <- # Vi is a list containing ...
getVarCov(fm17.1, "5", # ... matrix V_i for subject "5".
type = "marginal")
Vi.5 <- Vi$"5" # Vi.5 is a V_i matrix: (17.4)
Vi.u5 <- Vi.5[i.u5, i.u5] # A sub-matrix selected, ...
rownames(Vi.u5) <- nms.u5 # ... row/column names changed,
Vi.u5 # ... the sub-matrix printed.
cov2cor(Vi.u5) # Corr(V_i)
###################################################
### code chunk: R17.5
###################################################
rnf <- ranef(fm17.1) # b_i: (13.50)
(vrnf <- var(rnf)) # var(b_i). Compare to D in R17.13a.
plot(rnf) # Side-by-side plot (Fig. 17.1a)
library(ellipse)
myPanel <- function(x,y, ...){
panel.grid(h = -1, v = -1)
panel.xyplot(x, y)
ex1 <- # Ellipse based on D: (17.3)
ellipse(getVarCov(fm17.1))
panel.xyplot(ex1[, 1], ex1[, 2], type = "l", lty = 1)
ex2 <- ellipse(vrnf) # Ellipse based on var(b_i).
panel.xyplot(ex2[ ,1], ex2[, 2], type = "l", lty = 2)
}
xyplot(rnf[, 2] ~ rnf[, 1], # Scatterplot b_i1 versus b_i0 (Fig. 17.1b)
xlab = "Pre-intervention",
ylab = "Post-intervention",
xlim = c(-40, 40), ylim = c(-40, 40),
panel = myPanel)
###################################################
### code chunk: R17.6
###################################################
prt1r <- # Auxiliary data
within(prt1,
{ # Pearson residuals
residP1 <- residuals(fm17.1, type = "p")
fitted1 <- fitted(fm17.1)
})
range(prt1r$residP1) # Info for y-axis range
xyplot(residP1 ~ fitted1| occ.f, # Resids vs fitted (Fig. 17.2a)
data = prt1r, ylim = c(-6, 6),
type = c("p", "smooth"),
grid = TRUE)
qqnorm(prt1r$residP1); qqline(prt1r$residP1) # Q-Q plot (Fig. 17.3a)
###################################################
### code chunk: R17.7
###################################################
fm17.2 <- # M17.2 <- M17.1
update(fm17.1,
weights = varPower(form = ~fitted(.)),
data = prt1)
intervals(fm17.2)$varStruct # 95% CI for delta, (17.5)
anova(fm17.1, fm17.2) # H0: delta = 0 (M17.1 nested in M17.2)
###################################################
### code chunk: R17.8a
###################################################
lme.spec.form3 <-
update(lme.spec.form1, # M17.3 <- M17.1
. ~ . + fiber.f + prt.f:fiber.f + occ.f:fiber.f)
fm17.3 <-
lme(lme.spec.form3, # (17.6)
random = ~occ.f:fiber.f - 1|id, # D(17.7)
data = prt)
###################################################
### code chunk: R17.8b
###################################################
fixed.D4 <- summary(fm17.3)$tTable # beta, se(beta), t-test
rnms <- rownames(fixed.D4) # beta names (not shown)
rnms[8:11] <- # Selected names shortened
c("Low:Pos", "Low:Type2", "Pos:Type2", "Male:Old")
rownames(fixed.D4) <- rnms # Short names assigned
printCoefmat(fixed.D4, digits = 3, zap.ind = 5)
###################################################
### code chunk: R17.9a
###################################################
fm17.3cov <- # D: (17.7) extracted
getVarCov(fm17.3, type = "random.effect")
rownames(fm17.3cov) # Long names ...
nms. <- c("T1.Pre", "T1.Pos", "T2.Pre", "T2.Pos")# ... abbreviated
dimnames(fm17.3cov) <- list(nms., nms.) # ... and reassigned.
fm17.3cov # D: (17.7) printed
fm17.3cor <- cov2cor(fm17.3cov) # Corr(D) ...
print(fm17.3cor, digits = 2, # ...printed.
corr = TRUE, stdevs = FALSE)
###################################################
### code chunk: R17.9b
###################################################
dim(R.5 <- # Dims of R_i ...
getVarCov(fm17.3,
type = "conditional")[["5"]]) # ... for subject "5".
diag(R.5)[1:6] # First 6 diagonal elements
(sgma <- fm17.3$sigma) # sigma
print(sgma^2) # sigma^2
###################################################
### code chunk: R17.10
###################################################
CI <- intervals(fm17.3, which = "var-cov") # 95% CIs for theta_D
interv <- CI$reStruct$id
# rownames(interv) # Long names (not shown)
thDnms <-
c("sd(T1Pre)", "sd(T1Pos)", "sd(T2Pre)", "sd(T2Pos)",
"cor(T1Pre,T1Pos)", "cor(T1Pre,T2Pre)", "cor(T1Pre,T2Pos)",
"cor(T1Pos,T2Pre)", "cor(T1Pos,T2Pos)",
"cor(T2Pre,T2Pos)")
rownames(interv) <- thDnms # Short names assigned
interv # CIs printed
###################################################
### code chunk: R17.11
###################################################
residP3 <- residuals(fm17.3, type = "p") # Pearson residuals
xyplot(residP3 ~ fitted(fm17.3)| # Scatterplots ...
fiber.f:occ.f, # ...per type*occasion (Fig. 17.4)
data = prt,
type = c("p", "smooth"))
qqnorm(residP3); qqline(residP3) # Q-Q plot (Fig. 17.5)
###################################################
### code chunk: R17.12a
###################################################
Vx <- # Vx is a list ...
getVarCov(fm17.3, type = "marginal",
individual = "5") # ... with one component.
Vmtx.5 <- Vx$"5" # Vmtx.5 is V_i matrix (17.8)...
dim(Vmtx.5) # ... with large dimensions.
dt5 <- # Data with 41 rows ...
subset(prt,
select = c(id, fiber.f, occ.f), # ... and 3 variables ...
id == "5") # ... for subject "5".
###################################################
### code chunk: R17.12b
###################################################
(i.u5 <- unlist( # Selected indices printed.
tapply(rownames(dt5), # Indices for subject "5" ...
list(dt5$fiber.f, dt5$occ.f), # ... by fiber.f and occ.f.
FUN = auxF1)))
dt.u5 <- dt5[i.u5, ] # Raw data for selected indices
nms.u5 <-
paste(format(i.u5, 2, justify = "right"),
abbreviate(dt.u5$fiber.f, 2), # Row names abbreviated
dt.u5$occ.f, sep = ".")
###################################################
### code chunk: R17.12c
###################################################
Vmtx.u5 <- Vmtx.5[i.u5, i.u5] # Submatrix of V_i for subject "5"
dimnames(Vmtx.u5) <- list(nms.u5, i.u5) # dimnames assigned
Cmtx.u5 <- cov2cor(Vmtx.u5) # Submatrix of Corr(V_i)
uptri <- upper.tri(Cmtx.u5) # Logical matrix
Vmtx.u5[uptri] <- Cmtx.u5[uptri]
print(Vmtx.u5, digits = 2) # Submatrix printed
###################################################
### code chunk: R17.13a
###################################################
fm17.3a <-
lme(lme.spec.form3, # M17.3a
random = ~1 + fiber.f + occ.f + fiber.f:occ.f|id,
data = prt)
print(fm17.3a$sigma, digits = 4) # sigma
fm17.3acov <- # D
getVarCov(fm17.3a,
type = "random.effect", individual = "5")
dimnames(fm17.3acov)[[1]] # Row/col D names ...
nms <- c("(Int)", "T2", "Pos", "T2:Pos") # ... shortened
dimnames(fm17.3acov) <- list(nms,nms) # ... and assigned.
print(fm17.3acov, digits = 4) # D printed
###################################################
### code chunk: R17.13b
###################################################
td <- # T_D: (17.12) created...
matrix(c(1, 0, 0, 0,
1, 0, 1, 0,
1, 1, 0, 0,
1, 1, 1, 1),
nrow = 4, ncol = 4, byrow = TRUE)
mat.D4 <- td %*% fm17.3acov %*% t(td) # ... and applied.
dimnames(mat.D4) <- list(nms., nms.) # Row/col names shortened.
print(mat.D4, digits = 5) # D:(17.7); see R17.9.
###################################################
### code chunk: fig 17.6 using splom() function
###################################################
D173 <- getVarCov(fm17.3)
D173a <- getVarCov(fm17.3a)
nms <- c("T1:Pre/(Int)","T2:Pre/T2","T1:Pos/Pos","T2:Pos/T2:Pos","fitName")
dtref1 <- within(ranef(fm17.3), fitName <- "fm17.3")
names(dtref1)
names(dtref1) <- nms
dtref2 <- within(ranef(fm17.3a), fitName <- "fm17.3a")
names(dtref2)
names(dtref2) <- nms
dtref <- rbind(dtref1, dtref2)
(lims <- range(dtref[,1:4]))
lims <- c(-40,40) # user-defined limits for every variable
atx <- -1
myFunL <- function(corx) ltext(-15, 25, substitute(paste(rho, corx), list(corx = corx)), cex = 0.9)
myFunU <- function(corx) ltext(-15, -32, substitute(paste(rho, corx), list(corx = corx)), cex = 0.9)
my.upperPanel <- ## pairwise.complete.obs
function(x, y, subscripts, ...){
vr <- dtref$fitName == "fm17.3a"
subs <- subscripts[vr]
x1 <- x[subs]
y1 <- y[subs]
panel.grid(h = atx, v = atx, col = "grey", ...)
panel.xyplot(x1, y1, ...)
corx <- round(cor(x1, y1, use = "complete.obs"), 2)
abs.corx <- abs(corx)
corx <- paste("=", corx, sep = "")
myFunU(corx)
}
my.lowerPanel <- ## pairwise.complete.obs
function(x, y, subscripts, ...){
vr <- dtref$fitName == "fm17.3"
subs <- subscripts[vr]
x1 <- x[subs]
y1 <- y[subs]
panel.grid(h=atx,v=atx, col="grey") ## ...lty="13",...)
panel.xyplot(x1, y1, ...)
corx <- round(cor(x1, y1, use = "complete.obs"), 2)
abs.corx <- abs(corx)
corx <- paste("=", corx, sep = "")
print(corx)
cex.value <- 2
rx <- expression(paste(rho,corx))
myFunL(corx)
}
mySuperPanel <- function(z, subscripts, panel.subscripts,...){
panel.pairs(z, subscripts = subscripts,
panel.subscripts = panel.subscripts,
as.matrix = TRUE,
upper.panel = "my.upperPanel",
lower.panel = "my.lowerPanel",
## Possible to shorten syntax. See other splom figures
pscales =list(
"T1:Pre/(Int)" = list(limits = lims),
"T2:Pre/T2" = list(limits = lims),
"T1:Pos/Pos" = list(limits = lims),
"T2:Pos/T2:Pos" = list(limits = lims)) )
print(names(z))
}
abbrev.names <- c("vis0", "vis4", "vis12", "vis24", "vis52")
splom.form <- formula(~as.matrix(dtref[,1:4]))
splom(splom.form,
data = dtref,
as.matrix = TRUE, #### varnames = abbrev.names,
xlab = "",
superpanel = mySuperPanel
)
sessionInfo()
detach(package:nlme)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fasttext_interface.R
\name{inner_elapsed_time}
\alias{inner_elapsed_time}
\title{inner function of 'compute_elapsed_time'}
\usage{
inner_elapsed_time(secs, estimated = FALSE)
}
\arguments{
\item{secs}{a numeric value specifying the seconds}
\item{estimated}{a boolean. If TRUE then the output label becomes the 'Estimated time'}
}
\value{
a character string showing the estimated or elapsed time
}
\description{
inner function of 'compute_elapsed_time'
}
\keyword{internal}
| /man/inner_elapsed_time.Rd | no_license | mlampros/fastText | R | false | true | 553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fasttext_interface.R
\name{inner_elapsed_time}
\alias{inner_elapsed_time}
\title{inner function of 'compute_elapsed_time'}
\usage{
inner_elapsed_time(secs, estimated = FALSE)
}
\arguments{
\item{secs}{a numeric value specifying the seconds}
\item{estimated}{a boolean. If TRUE then the output label becomes the 'Estimated time'}
}
\value{
a character string showing the estimated or elapsed time
}
\description{
inner function of 'compute_elapsed_time'
}
\keyword{internal}
|
#' Run the MSE for a number of runs for any scenarios
#'
#' @details Saves the output of the MSE runs to the files specified in `fns`, in the directories
#' specified in `results_root_dir` and `results_dir`
#'
#' @param n_runs Then number of runs to do for each simulation
#' @param n_sim_yrs The number of years to simulate into the future
#' @param fns A vector of file names for the scenarios (.rds files). .rds extension is optional
#' @param plot_names A vector of strings to use for the scenarios later when plotting. Must either be
#' `NULL` or the same length as `fns`
#' @param tacs A vector of TAC values to be passed to the [run_mse_scenario()] function, in the same
#' order as the `fns` file names, or a single value
#' @param attains A vector of 2, in the order Canada, US for proportion of catch to include
#' @param c_increases Increase in max movement. A vector of values to be passed to the [run_mse_scenario()]
#' function, in the same order as the `fns` file names, or a single value which will be used for all scenarios
#' @param m_increases Decrease of spawners returning south. A vector of values to be passed to the
#' [run_mse_scenario()] function, in the same order as the `fns` file names, or a single value which
#' will be used for all scenarios
#' @param sel_changes Selectivity scenario change type. A vector of values to be passed to the
#' [run_mse_scenario()] function, in the same order as the `fns` file names, or a single value which will
#' be used for all scenarios
#' @param n_surveys The number of surveys for each run. This must be a vector of the same length as `fns` or `NULL`.
#' If `NULL`, 2 will be used for every scenario
#' @param b_futures A vector of values to be passed to the [run_mse_scenario()] function for bias adjustment into
#' the future, in the same order as the `fns` file names, or a single value which will be used for all scenarios
#' @param random_seed A seed value to use when calling for all random functions
#' @param results_root_dir The results root directory
#' @param results_dir The results directory
#' @param catch_floor The lowest value to allow catch to drop to when applying the tac rule for the catch floor
#' @param single_seed If NULL, ignore. If a number, use that as a seed to run a single run of the MSE. User for testing.
#' @param ... Arguments passed to [load_data_om()]
#'
#' @return Nothing
#' @importFrom dplyr transmute group_map mutate_at quo
#' @importFrom here here
#' @importFrom purrr map2 map map_chr map_lgl
#' @importFrom r4ss SS_output
#' @importFrom stringr str_ends
#' @importFrom clisymbols symbol
#' @importFrom tictoc tic toc
#' @importFrom crayon white
#' @export
run_mses <- function(n_runs = 10,
n_sim_yrs = NULL,
fns = NULL,
plot_names = NULL,
tacs = c(0, 1),
attains = c(1, 1),
c_increases = 0,
m_increases = 0,
sel_changes = 0,
n_surveys = 2,
b_futures = 0.5,
random_seed = 12345,
single_seed = NULL,
results_root_dir = here("results"),
results_dir = here("results", "default"),
catch_floor = NULL,
hcr_apply = FALSE,
...){
# Check file names and append .rds if necessary
fns <- map_chr(fns, ~{
ifelse(str_ends(.x, pattern = "\\.rds"), .x, paste0(.x, ".rds"))
})
if(!dir.exists(results_root_dir)){
dir.create(results_root_dir)
}
if(!dir.exists(results_dir)){
dir.create(results_dir)
}
# This function expands a single value to a vector of the length of `fns`. If it is already
# the same length, nothing happens.
fill_vec <- function(d){
stopifnot(length(d) == 1 | length(d) == length(fns))
if(length(d) == 1 && length(fns) > 1){
d <- rep(d, length(fns))
}
d
}
c_increases <- fill_vec(c_increases)
m_increases <- fill_vec(m_increases)
sel_changes <- fill_vec(sel_changes)
n_surveys <- fill_vec(n_surveys)
b_futures <- fill_vec(b_futures)
tacs <- fill_vec(tacs)
attains <- fill_vec(attains)
if(any(map_lgl(tacs, ~{length(.x) == 1 && .x != 0})) && is.null(catch_floor)){
stop("`catch_floor` argument is NULL with at least one of the `tac` argument list ",
"values having length 1, and being not equal to zero (which signifies no tac application). ",
"Provide a catch_floor value to use when applying tac value of 1.",
call. = FALSE)
}
if(!all(map_lgl(tacs, ~{if(length(.x) %in% 1:2) TRUE else FALSE}))){
stop("List elements of `tacs` must be either length 1 or length 2.",
call. = FALSE)
}
tic()
# Seed for the random recruitment deviations (search rnorm in update_om_data.R) and
# survey error (search rnorm in run_year_loop.R).
# This is also the seed used to set up the random seeds for each run:
set.seed(random_seed)
random_seeds <- floor(runif(n = n_runs, min = 1, max = 1e6))
# Load the raw SS model inputs and outputs using the r4ss package and the same
# methods used in the `hake-assessment` package
# Create objects from the raw SS model inputs and outputs and
# only include those in this list. To add new SS model outputs,
# modify the `load_ss_model_data()` function
ss_model <- load_ss_model_data(...)
cat(green(symbol$tick), green(" SS model output successfully loaded\n"))
if(!is.null(single_seed)){
cat(white("Scenario:", fns[1], "\n"))
om <- load_data_om(ss_model,
n_sim_yrs = n_sim_yrs,
n_survey = n_surveys[1],
b_future = b_futures[1],
selectivity_change = sel_changes[1],
...)
cat(green("Single run, seed = ", single_seed, "\n"))
tmp <- run_mse_scenario(om = om,
ss_model = ss_model,
n_sim_yrs = n_sim_yrs,
random_seed = single_seed,
sel_change = sel_changes[1],
c_increase = c_increases[1],
m_increase = m_increases[1],
tac = tacs[[1]],
attain = attains[[1]],
catch_floor = catch_floor,
hcr_apply = FALSE, # Tell OM not to apply HCR. If TRUE MSE will break
...)
cat(green("End single run\n"))
toc()
return(invisible())
}
# Begin MSEs loop -----------------------------------------------------------
map2(fns, 1:length(fns), function(fn = .x, fn_ind = .y, ...){
cat(white("Scenario:", fn, "\n"))
#lst <- furrr::future_map(1:n_runs, function(run = .x, ...){
lst <- map(1:n_runs, function(run = .x, ...){
om <- load_data_om(ss_model,
n_sim_yrs = n_sim_yrs,
n_survey = n_surveys[fn_ind],
b_future = b_futures[fn_ind],
selectivity_change = sel_changes[fn_ind],
...)
cat(green("Run #", run, "\n"))
tmp <- run_mse_scenario(om = om,
ss_model = ss_model,
n_sim_yrs = n_sim_yrs,
random_seed = random_seeds[run],
sel_change = sel_changes[fn_ind],
c_increase = c_increases[fn_ind],
m_increase = m_increases[fn_ind],
tac = tacs[[fn_ind]],
attain = attains[[fn_ind]],
catch_floor = catch_floor,
hcr_apply = FALSE, # Tell OM not to apply HCR. If TRUE MSE will break
...)
if(is.list(tmp)) tmp else NA
}, ...)
#}, ..., .options = furrr::furrr_options(seed = T))
attr(lst, "plotname") <- plot_names[fn_ind]
saveRDS(lst, file = file.path(results_dir, fn))
}, ...)
# End MSEs loop -----------------------------------------------------------
toc()
}
| /R/run_mses.R | no_license | aaronmberger-nwfsc/pacifichakemse-1 | R | false | false | 8,253 | r | #' Run the MSE for a number of runs for any scenarios
#'
#' @details Saves the output of the MSE runs to the files specified in `fns`, in the directories
#' specified in `results_root_dir` and `results_dir`
#'
#' @param n_runs Then number of runs to do for each simulation
#' @param n_sim_yrs The number of years to simulate into the future
#' @param fns A vector of file names for the scenarios (.rds files). .rds extension is optional
#' @param plot_names A vector of strings to use for the scenarios later when plotting. Must either be
#' `NULL` or the same length as `fns`
#' @param tacs A vector of TAC values to be passed to the [run_mse_scenario()] function, in the same
#' order as the `fns` file names, or a single value
#' @param attains A vector of 2, in the order Canada, US for proportion of catch to include
#' @param c_increases Increase in max movement. A vector of values to be passed to the [run_mse_scenario()]
#' function, in the same order as the `fns` file names, or a single value which will be used for all scenarios
#' @param m_increases Decrease of spawners returning south. A vector of values to be passed to the
#' [run_mse_scenario()] function, in the same order as the `fns` file names, or a single value which
#' will be used for all scenarios
#' @param sel_changes Selectivity scenario change type. A vector of values to be passed to the
#' [run_mse_scenario()] function, in the same order as the `fns` file names, or a single value which will
#' be used for all scenarios
#' @param n_surveys The number of surveys for each run. This must be a vector of the same length as `fns` or `NULL`.
#' If `NULL`, 2 will be used for every scenario
#' @param b_futures A vector of values to be passed to the [run_mse_scenario()] function for bias adjustment into
#' the future, in the same order as the `fns` file names, or a single value which will be used for all scenarios
#' @param random_seed A seed value to use when calling for all random functions
#' @param results_root_dir The results root directory
#' @param results_dir The results directory
#' @param catch_floor The lowest value to allow catch to drop to when applying the tac rule for the catch floor
#' @param single_seed If NULL, ignore. If a number, use that as a seed to run a single run of the MSE. User for testing.
#' @param ... Arguments passed to [load_data_om()]
#'
#' @return Nothing
#' @importFrom dplyr transmute group_map mutate_at quo
#' @importFrom here here
#' @importFrom purrr map2 map map_chr map_lgl
#' @importFrom r4ss SS_output
#' @importFrom stringr str_ends
#' @importFrom clisymbols symbol
#' @importFrom tictoc tic toc
#' @importFrom crayon white
#' @export
run_mses <- function(n_runs = 10,
n_sim_yrs = NULL,
fns = NULL,
plot_names = NULL,
tacs = c(0, 1),
attains = c(1, 1),
c_increases = 0,
m_increases = 0,
sel_changes = 0,
n_surveys = 2,
b_futures = 0.5,
random_seed = 12345,
single_seed = NULL,
results_root_dir = here("results"),
results_dir = here("results", "default"),
catch_floor = NULL,
hcr_apply = FALSE,
...){
# Check file names and append .rds if necessary
fns <- map_chr(fns, ~{
ifelse(str_ends(.x, pattern = "\\.rds"), .x, paste0(.x, ".rds"))
})
if(!dir.exists(results_root_dir)){
dir.create(results_root_dir)
}
if(!dir.exists(results_dir)){
dir.create(results_dir)
}
# This function expands a single value to a vector of the length of `fns`. If it is already
# the same length, nothing happens.
fill_vec <- function(d){
stopifnot(length(d) == 1 | length(d) == length(fns))
if(length(d) == 1 && length(fns) > 1){
d <- rep(d, length(fns))
}
d
}
c_increases <- fill_vec(c_increases)
m_increases <- fill_vec(m_increases)
sel_changes <- fill_vec(sel_changes)
n_surveys <- fill_vec(n_surveys)
b_futures <- fill_vec(b_futures)
tacs <- fill_vec(tacs)
attains <- fill_vec(attains)
if(any(map_lgl(tacs, ~{length(.x) == 1 && .x != 0})) && is.null(catch_floor)){
stop("`catch_floor` argument is NULL with at least one of the `tac` argument list ",
"values having length 1, and being not equal to zero (which signifies no tac application). ",
"Provide a catch_floor value to use when applying tac value of 1.",
call. = FALSE)
}
if(!all(map_lgl(tacs, ~{if(length(.x) %in% 1:2) TRUE else FALSE}))){
stop("List elements of `tacs` must be either length 1 or length 2.",
call. = FALSE)
}
tic()
# Seed for the random recruitment deviations (search rnorm in update_om_data.R) and
# survey error (search rnorm in run_year_loop.R).
# This is also the seed used to set up the random seeds for each run:
set.seed(random_seed)
random_seeds <- floor(runif(n = n_runs, min = 1, max = 1e6))
# Load the raw SS model inputs and outputs using the r4ss package and the same
# methods used in the `hake-assessment` package
# Create objects from the raw SS model inputs and outputs and
# only include those in this list. To add new SS model outputs,
# modify the `load_ss_model_data()` function
ss_model <- load_ss_model_data(...)
cat(green(symbol$tick), green(" SS model output successfully loaded\n"))
if(!is.null(single_seed)){
cat(white("Scenario:", fns[1], "\n"))
om <- load_data_om(ss_model,
n_sim_yrs = n_sim_yrs,
n_survey = n_surveys[1],
b_future = b_futures[1],
selectivity_change = sel_changes[1],
...)
cat(green("Single run, seed = ", single_seed, "\n"))
tmp <- run_mse_scenario(om = om,
ss_model = ss_model,
n_sim_yrs = n_sim_yrs,
random_seed = single_seed,
sel_change = sel_changes[1],
c_increase = c_increases[1],
m_increase = m_increases[1],
tac = tacs[[1]],
attain = attains[[1]],
catch_floor = catch_floor,
hcr_apply = FALSE, # Tell OM not to apply HCR. If TRUE MSE will break
...)
cat(green("End single run\n"))
toc()
return(invisible())
}
# Begin MSEs loop -----------------------------------------------------------
map2(fns, 1:length(fns), function(fn = .x, fn_ind = .y, ...){
cat(white("Scenario:", fn, "\n"))
#lst <- furrr::future_map(1:n_runs, function(run = .x, ...){
lst <- map(1:n_runs, function(run = .x, ...){
om <- load_data_om(ss_model,
n_sim_yrs = n_sim_yrs,
n_survey = n_surveys[fn_ind],
b_future = b_futures[fn_ind],
selectivity_change = sel_changes[fn_ind],
...)
cat(green("Run #", run, "\n"))
tmp <- run_mse_scenario(om = om,
ss_model = ss_model,
n_sim_yrs = n_sim_yrs,
random_seed = random_seeds[run],
sel_change = sel_changes[fn_ind],
c_increase = c_increases[fn_ind],
m_increase = m_increases[fn_ind],
tac = tacs[[fn_ind]],
attain = attains[[fn_ind]],
catch_floor = catch_floor,
hcr_apply = FALSE, # Tell OM not to apply HCR. If TRUE MSE will break
...)
if(is.list(tmp)) tmp else NA
}, ...)
#}, ..., .options = furrr::furrr_options(seed = T))
attr(lst, "plotname") <- plot_names[fn_ind]
saveRDS(lst, file = file.path(results_dir, fn))
}, ...)
# End MSEs loop -----------------------------------------------------------
toc()
}
|
## Setup ####
require(socialmixr)
require(magrittr)
require(stringr)
require(reshape2)
require(dplyr)
require(ggplot2)
require(truncnorm)
source("asyptomatic_age.R")
age.limits <- c(0,5,10,15,20,25,35,45,55,60,65,75,85,90)
prop_symptomatic <- c(0.141, 0.106, 0.074, 0.184, 0.293, 0.387, 0.438,
0.535, 0.693, 0.816, 0.765, 0.749, 0.535, 0.535)
delta.t <- 1/1
time <- seq(1,300,by = delta.t)
t_March19 <- as.numeric(as.Date("2020-03-19") - as.Date("2020-03-01"))
t_May14 <- as.numeric(as.Date("2020-05-14") - as.Date("2020-03-01"))
nsim <- 500
start_index <- seq(1, nsim*length(time)+1, by = length(time))
all_prelim_info <- setup_seir_model(stoch = TRUE,
R0 = 2,
c_scale_vec = 1,
prop_symptomatic = prop_symptomatic,
sd.dw = 0.1)
Ncomp = all_prelim_info$Ncomp
ICs = all_prelim_info$ICs
params = list(C = all_prelim_info$C,
W = all_prelim_info$W,
beta0 = all_prelim_info$beta0,
beta1 = all_prelim_info$beta1,
phase = all_prelim_info$phase,
mu = all_prelim_info$mu,
v = all_prelim_info$v,
N=all_prelim_info$N,
gamma=all_prelim_info$gamma,
sigma = all_prelim_info$sigma,
prop_symptomatic=all_prelim_info$prop_symptomatic,
sd.dw = all_prelim_info$sd.dw)
cnames.allsim <- c('run_index', 'time',
paste0("S", 1:Ncomp),
paste0("E", 1:Ncomp),
paste0("A", 1:Ncomp),
paste0("I", 1:Ncomp),
paste0("R", 1:Ncomp),
paste0("incid_A", 1:Ncomp),
paste0("incid_I", 1:Ncomp),
"R0")
## Scenario 1A: R0 1.5 - 2.5, uncontrolled ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 1.5, max = 2.5)
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario1A_lowR0_uncontrolled.csv")
## Scenario 1B: R0 1.5 - 2.5, 30-40% reduction March 19 - May 14 ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 1.5, max = 2.5)
R0vec[t_March19:t_May14] <- runif(length(t_March19:t_May14), 1.5*(1-0.4), 2.5*(1-0.3))
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario1B_lowR0_MildDistancing.csv")
## Scenario 1C: R0 1.5 - 2.5, 45-65% reduction March 19 - May 14, 48-76% May 15 - Dec 26 ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 1.5, max = 2.5)
R0vec[t_March19:t_May14] <- runif(length(t_March19:t_May14), 1.5*(1-0.65), 2.5*(1-0.45))
R0vec[(t_May14+1):length(time)] <- runif(length((t_May14+1):length(time)), 1.5*(1-0.76), 2.5*(1-0.48))
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario1C_lowR0_ModDistancing.csv")
## Scenario 2A: R0 2.5 - 3.5, uncontrolled ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 2.5, max = 3.5)
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario2A_highR0_uncontrolled.csv")
## Scenario 2B: R0 2.5 - 3.5, 30-40% reduction March 19 - May 14 ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 2.5, max = 3.5)
R0vec[t_March19:t_May14] <- runif(length(t_March19:t_May14), 2.5*(1-0.4), 3.5*(1-0.3))
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario2B_highR0_MildDistancing.csv")
## Scenario 2C: R0 2.5 - 3.5, 45-65% reduction March 19 - May 14, 48-76% May 15 - Dec 26 ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 2.5, max = 3.5)
R0vec[t_March19:t_May14] <- runif(length(t_March19:t_May14), 2.5*(1-0.65), 3.5*(1-0.45))
R0vec[(t_May14+1):length(time)] <- runif(length((t_May14+1):length(time)), 2.5*(1-0.76), 3.5*(1-0.48))
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario2C_highR0_ModDistancing.csv")
| /Baltimore_20200405.R | no_license | sberube3/Baltimore_City_Modeling | R | false | false | 6,045 | r |
## Setup ####
require(socialmixr)
require(magrittr)
require(stringr)
require(reshape2)
require(dplyr)
require(ggplot2)
require(truncnorm)
source("asyptomatic_age.R")
age.limits <- c(0,5,10,15,20,25,35,45,55,60,65,75,85,90)
prop_symptomatic <- c(0.141, 0.106, 0.074, 0.184, 0.293, 0.387, 0.438,
0.535, 0.693, 0.816, 0.765, 0.749, 0.535, 0.535)
delta.t <- 1/1
time <- seq(1,300,by = delta.t)
t_March19 <- as.numeric(as.Date("2020-03-19") - as.Date("2020-03-01"))
t_May14 <- as.numeric(as.Date("2020-05-14") - as.Date("2020-03-01"))
nsim <- 500
start_index <- seq(1, nsim*length(time)+1, by = length(time))
all_prelim_info <- setup_seir_model(stoch = TRUE,
R0 = 2,
c_scale_vec = 1,
prop_symptomatic = prop_symptomatic,
sd.dw = 0.1)
Ncomp = all_prelim_info$Ncomp
ICs = all_prelim_info$ICs
params = list(C = all_prelim_info$C,
W = all_prelim_info$W,
beta0 = all_prelim_info$beta0,
beta1 = all_prelim_info$beta1,
phase = all_prelim_info$phase,
mu = all_prelim_info$mu,
v = all_prelim_info$v,
N=all_prelim_info$N,
gamma=all_prelim_info$gamma,
sigma = all_prelim_info$sigma,
prop_symptomatic=all_prelim_info$prop_symptomatic,
sd.dw = all_prelim_info$sd.dw)
cnames.allsim <- c('run_index', 'time',
paste0("S", 1:Ncomp),
paste0("E", 1:Ncomp),
paste0("A", 1:Ncomp),
paste0("I", 1:Ncomp),
paste0("R", 1:Ncomp),
paste0("incid_A", 1:Ncomp),
paste0("incid_I", 1:Ncomp),
"R0")
## Scenario 1A: R0 1.5 - 2.5, uncontrolled ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 1.5, max = 2.5)
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario1A_lowR0_uncontrolled.csv")
## Scenario 1B: R0 1.5 - 2.5, 30-40% reduction March 19 - May 14 ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 1.5, max = 2.5)
R0vec[t_March19:t_May14] <- runif(length(t_March19:t_May14), 1.5*(1-0.4), 2.5*(1-0.3))
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario1B_lowR0_MildDistancing.csv")
## Scenario 1C: R0 1.5 - 2.5, 45-65% reduction March 19 - May 14, 48-76% May 15 - Dec 26 ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 1.5, max = 2.5)
R0vec[t_March19:t_May14] <- runif(length(t_March19:t_May14), 1.5*(1-0.65), 2.5*(1-0.45))
R0vec[(t_May14+1):length(time)] <- runif(length((t_May14+1):length(time)), 1.5*(1-0.76), 2.5*(1-0.48))
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario1C_lowR0_ModDistancing.csv")
## Scenario 2A: R0 2.5 - 3.5, uncontrolled ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 2.5, max = 3.5)
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario2A_highR0_uncontrolled.csv")
## Scenario 2B: R0 2.5 - 3.5, 30-40% reduction March 19 - May 14 ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 2.5, max = 3.5)
R0vec[t_March19:t_May14] <- runif(length(t_March19:t_May14), 2.5*(1-0.4), 3.5*(1-0.3))
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario2B_highR0_MildDistancing.csv")
## Scenario 2C: R0 2.5 - 3.5, 45-65% reduction March 19 - May 14, 48-76% May 15 - Dec 26 ####
all_sim <- matrix(NA,1,(Ncomp*7)+3)
colnames(all_sim) <- cnames.allsim
for(n in 1:nsim){
R0vec <- runif(length(time), min = 2.5, max = 3.5)
R0vec[t_March19:t_May14] <- runif(length(t_March19:t_May14), 2.5*(1-0.65), 3.5*(1-0.45))
R0vec[(t_May14+1):length(time)] <- runif(length((t_May14+1):length(time)), 2.5*(1-0.76), 3.5*(1-0.48))
tmp <- sair_step_variableR0(stoch = TRUE, stoch.init = TRUE,
R0vec, Ncomp, ICs, params, time, delta.t)
run_index = rep(n, nrow(tmp))
tmp <- cbind(run_index, tmp)
all_sim <- rbind(all_sim, tmp)
}
all_sim <- all_sim[-1,]
write.csv(all_sim, file="output_20200405/scenario2C_highR0_ModDistancing.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{getClusterSimilarity}
\alias{getClusterSimilarity}
\title{getClusterSimilarity}
\usage{
getClusterSimilarity(gobject, expression_values = c("normalized",
"scaled", "custom"), cluster_column, cor = c("pearson", "spearman"))
}
\arguments{
\item{gobject}{giotto object}
\item{expression_values}{expression values to use}
\item{cluster_column}{name of column to use for clusters}
\item{cor}{correlation score to calculate distance}
}
\value{
data.table
}
\description{
Creates data.table with pairwise correlation scores between each cluster.
}
\details{
Creates data.table with pairwise correlation scores between each cluster and
the group size (# of cells) for each cluster. This information can be used together
with mergeClusters to combine very similar or small clusters into bigger clusters.
}
\examples{
getClusterSimilarity(gobject)
}
| /man/getClusterSimilarity.Rd | permissive | kanliu0205/Giotto | R | false | true | 946 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{getClusterSimilarity}
\alias{getClusterSimilarity}
\title{getClusterSimilarity}
\usage{
getClusterSimilarity(gobject, expression_values = c("normalized",
"scaled", "custom"), cluster_column, cor = c("pearson", "spearman"))
}
\arguments{
\item{gobject}{giotto object}
\item{expression_values}{expression values to use}
\item{cluster_column}{name of column to use for clusters}
\item{cor}{correlation score to calculate distance}
}
\value{
data.table
}
\description{
Creates data.table with pairwise correlation scores between each cluster.
}
\details{
Creates data.table with pairwise correlation scores between each cluster and
the group size (# of cells) for each cluster. This information can be used together
with mergeClusters to combine very similar or small clusters into bigger clusters.
}
\examples{
getClusterSimilarity(gobject)
}
|
\name{yb}
\alias{yb}
\docType{data}
\title{
Youden and Beale's Data on Lesions of Half-Leaves of Tobacco Plant
}
\description{
A simple and innovative design is often priceless. Youden and Beale (1934) sought to find the effect of two preparations of virus on tobacco plants. One half of a tobacco leaf was rubbed with cheesecloth soaked in one preparation of the virus extract and the second half was rubbed with the other virus extract. This experiment was replicated on just eight leaves, and the number of lesions on each half leaf was recorded.
}
\usage{data(yb)}
\format{
A data frame with 8 observations on the following 2 variables.
\describe{
\item{\code{Preparation_1}}{a numeric vector}
\item{\code{Preparation_2}}{a numeric vector}
}
}
\references{
Youden, W. J., and Beale, H. P. (1934). A Statistical Study of the Local Lesion Method for Estimating Tobacco Mosaic Virus. Contrib. Boyce Thompson Inst, 6, 437-454.
}
\examples{
data(yb)
summary(yb)
quantile(yb$Preparation_1,seq(0,1,.1)) # here seq gives 0, .1, .2, ...,1
quantile(yb$Preparation_2,seq(0,1,.1))
fivenum(yb$Preparation_1)
fivenum(yb$Preparation_2)
}
\keyword{exploratory data analysis} | /man/yb.Rd | no_license | cran/ACSWR | R | false | false | 1,177 | rd | \name{yb}
\alias{yb}
\docType{data}
\title{
Youden and Beale's Data on Lesions of Half-Leaves of Tobacco Plant
}
\description{
A simple and innovative design is often priceless. Youden and Beale (1934) sought to find the effect of two preparations of virus on tobacco plants. One half of a tobacco leaf was rubbed with cheesecloth soaked in one preparation of the virus extract and the second half was rubbed with the other virus extract. This experiment was replicated on just eight leaves, and the number of lesions on each half leaf was recorded.
}
\usage{data(yb)}
\format{
A data frame with 8 observations on the following 2 variables.
\describe{
\item{\code{Preparation_1}}{a numeric vector}
\item{\code{Preparation_2}}{a numeric vector}
}
}
\references{
Youden, W. J., and Beale, H. P. (1934). A Statistical Study of the Local Lesion Method for Estimating Tobacco Mosaic Virus. Contrib. Boyce Thompson Inst, 6, 437-454.
}
\examples{
data(yb)
summary(yb)
quantile(yb$Preparation_1,seq(0,1,.1)) # here seq gives 0, .1, .2, ...,1
quantile(yb$Preparation_2,seq(0,1,.1))
fivenum(yb$Preparation_1)
fivenum(yb$Preparation_2)
}
\keyword{exploratory data analysis} |
\name{np.order}
\title{Sample Size Determination for Tolerance Limits Based on Order Statistics}
\alias{np.order}
\usage{
np.order(m, alpha = 0.05, P = 0.99, indices = FALSE)
}
\description{
For given values of \code{m}, \code{alpha}, and \code{P}, this function solves the necessary sample size such that the
\code{r}-th (or (\code{n-s+1})-th) order statistic is the \code{[100(1-alpha)\%, 100(P)\%]} lower (or upper) tolerance
limit (see the Details section below for further explanation). This function can also report all combinations of order
statistics for 2-sided intervals.
}
\arguments{
\item{m}{See the Details section below for how \code{m} is defined.}
\item{alpha}{1 minus the confidence level attained when it is desired to cover a proportion \code{P}
of the population with the order statistics.}
\item{P}{The proportion of the population to be covered with confidence \code{1-alpha} with the order statistics.}
\item{indices}{An optional argument to report all combinations of order statistics indices for the upper and lower limits
of the 2-sided intervals. Note that this can only be calculated when \code{m>1}.}
} \value{
If \code{indices = FALSE}, then a single number is returned for the necessary sample size such that the
\code{r}-th (or (\code{n-s+1})-th) order statistic is the \code{[100(1-alpha)\%, 100(P)\%]} lower (or upper) tolerance
limit. If \code{indices = TRUE}, then a list is returned with a single number for the necessary sample size and a matrix
with 2 columns where each row gives the pairs of indices for the order statistics for all permissible \code{[100(1-alpha)\%, 100(P)\%]}
2-sided tolerance intervals.
} \seealso{
\code{\link{nptol.int}}
} \details{
For the 1-sided tolerance limits, \code{m=s+r} such that the probability is at least \code{1-alpha} that at least the
proportion \code{P} of the population is below the (\code{n-s+1})-th order statistic for the upper limit or above the \code{r}-th order statistic
for the lower limit. This means for the 1-sided upper limit that \code{r=1}, while for the 1-sided lower limit it means that \code{s=1}.
For the 2-sided tolerance intervals, \code{m=s+r} such that the probability is at least \code{1-alpha} that at least the
proportion \code{P} of the population is between the \code{r}-th and (\code{n-s+1})-th order statistics. Thus, all combinations of r>0 and
s>0 such that \code{m=s+r} are considered.
}
\references{
Hanson, D. L. and Owen, D. B. (1963), Distribution-Free Tolerance Limits Elimination of the Requirement That
Cumulative Distribution Functions Be Continuous, \emph{Technometrics}, \bold{5}, 518--522.
Scheffe, H. and Tukey, J. W. (1945), Non-Parametric Estimation I. Validation of Order Statistics,
\emph{Annals of Mathematical Statistics}, \bold{16}, 187--192.
}
\examples{
## Only requesting the sample size.
np.order(m = 5, alpha = 0.05, P = 0.95)
## Requesting the order statistics indices as well.
np.order(m = 5, alpha = 0.05, P = 0.95, indices = TRUE)
}
\keyword{file}
| /man/nporder.Rd | no_license | cran/tolerance | R | false | false | 3,131 | rd | \name{np.order}
\title{Sample Size Determination for Tolerance Limits Based on Order Statistics}
\alias{np.order}
\usage{
np.order(m, alpha = 0.05, P = 0.99, indices = FALSE)
}
\description{
For given values of \code{m}, \code{alpha}, and \code{P}, this function solves the necessary sample size such that the
\code{r}-th (or (\code{n-s+1})-th) order statistic is the \code{[100(1-alpha)\%, 100(P)\%]} lower (or upper) tolerance
limit (see the Details section below for further explanation). This function can also report all combinations of order
statistics for 2-sided intervals.
}
\arguments{
\item{m}{See the Details section below for how \code{m} is defined.}
\item{alpha}{1 minus the confidence level attained when it is desired to cover a proportion \code{P}
of the population with the order statistics.}
\item{P}{The proportion of the population to be covered with confidence \code{1-alpha} with the order statistics.}
\item{indices}{An optional argument to report all combinations of order statistics indices for the upper and lower limits
of the 2-sided intervals. Note that this can only be calculated when \code{m>1}.}
} \value{
If \code{indices = FALSE}, then a single number is returned for the necessary sample size such that the
\code{r}-th (or (\code{n-s+1})-th) order statistic is the \code{[100(1-alpha)\%, 100(P)\%]} lower (or upper) tolerance
limit. If \code{indices = TRUE}, then a list is returned with a single number for the necessary sample size and a matrix
with 2 columns where each row gives the pairs of indices for the order statistics for all permissible \code{[100(1-alpha)\%, 100(P)\%]}
2-sided tolerance intervals.
} \seealso{
\code{\link{nptol.int}}
} \details{
For the 1-sided tolerance limits, \code{m=s+r} such that the probability is at least \code{1-alpha} that at least the
proportion \code{P} of the population is below the (\code{n-s+1})-th order statistic for the upper limit or above the \code{r}-th order statistic
for the lower limit. This means for the 1-sided upper limit that \code{r=1}, while for the 1-sided lower limit it means that \code{s=1}.
For the 2-sided tolerance intervals, \code{m=s+r} such that the probability is at least \code{1-alpha} that at least the
proportion \code{P} of the population is between the \code{r}-th and (\code{n-s+1})-th order statistics. Thus, all combinations of r>0 and
s>0 such that \code{m=s+r} are considered.
}
\references{
Hanson, D. L. and Owen, D. B. (1963), Distribution-Free Tolerance Limits Elimination of the Requirement That
Cumulative Distribution Functions Be Continuous, \emph{Technometrics}, \bold{5}, 518--522.
Scheffe, H. and Tukey, J. W. (1945), Non-Parametric Estimation I. Validation of Order Statistics,
\emph{Annals of Mathematical Statistics}, \bold{16}, 187--192.
}
\examples{
## Only requesting the sample size.
np.order(m = 5, alpha = 0.05, P = 0.95)
## Requesting the order statistics indices as well.
np.order(m = 5, alpha = 0.05, P = 0.95, indices = TRUE)
}
\keyword{file}
|
rm(list = ls())
# Dados
banco <- bodPercentfat
banco
head(banco)
attach(banco)
names(banco)
typeof(banco)
# Avaliando a variável y,no caso a variável Percent, que tem uma observação que é 0
# e isso tem bloqueado que diversos outros modelos possam rodar.
# Dado essa situação uma alternativa é somar uma constante. Para não apenas
# sair somando sem fundo nem nexo a ideia e avaliar como se comporta a variável Percent
# olhar sua média, sua mediana e avalar qual dessas medidas de posição seria mais
# adequada pra somar. ou apenas somar um 10.
banco[182, ]
banco$Percent <- banco$Percent + 10
banco
# Avaliando a variável y,no caso a variável Percent, que tem uma observação que é 0
# e isso tem bloqueado que diversos outros modelos possam rodar.
# Dado essa situação uma alternativa é somar uma constante. Para não apenas
# sair somando sem fundo nem nexo a ideia e avaliar como se comporta a variável Percent
# olhar sua média, sua mediana e avalar qual dessas medidas de posição seria mais
# adequada pra somar. ou apenas somar um 10.
# Como a variável resposta é continua e não é contagem, podemos usar
# as distribuições gaussiana, Gamma, inverse.gaussian
# nao podemos usar binomial, quasibinomial, nem opisson ou quasipoisson
# vamos fazer os modelos com todas as funções de ligação possível.
# Primeiramente com todas as variáveis e em seguinda ir fazendo a seleção
# de variaveis.
# Rodar modelos MGL's e ver qual eh melhor por validacao cruzada.
# começando a seleção de variáveis ao nível de signiicancia de 10%.
# Modelo gaussiano com função de ligação identidade.
m_gi <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi)
# retirando a variável Knee com p-valor de 0.94970 no teste t.
m_gi1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi1)
# retirando a variável Chest com p-valor de 0.80454 no teste t.
m_gi2 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi2)
# retirando a variável Heigth com p-valor de 0.49280 no teste t.
m_gi3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi3)
# retirando a variável Ankle com p-valor de 0.39568 no teste t.
m_gi4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi4)
# retirando a variável Biceps com p-valor de 0.28878 no teste t.
m_gi5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi5)
# retirando a variável Hip com p-valor de 0.15940 no teste t.
m_gif <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Thigh + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gif)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo gaussiano com função de
# ligação identidade.
# Modelo gaussiano com função de ligação log.
m_gl <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl)
# retirando a variável Chest + com p-valor de 0.641041 no teste t.
m_gl1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl1)
# retirando a variável Heigth + com p-valor de 0.587286 no teste t.
m_gl2 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl2)
# retirando a variável Ankle +com p-valor de 0.449845 no teste t.
m_gl3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl3)
# retirando a variável Biceps + com p-valor de 0.271758 no teste t.
m_gl4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl4)
# retirando a variável Knee + com p-valor de 0.25300 no teste t.
m_glf <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_glf)
# E ficou sendo o modelo final passando pelo teste t com 8 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo gaussiano com função de
# ligação log.
# modelo gaussiano com função de ligação inversa.
m_gin <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin)
# retirando a variável Ankle com p-valor de 0.48380 no teste t.
m_gin1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin1)
# retirando a variável Chest com p-valor de 0.41261 no teste t.
m_gin2 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin2)
# retirando a variável Heigth com p-valor de 0.37424 no teste t.
m_gin3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin3)
# retirando a variável Heigth com p-valor de 0.37424 no teste t.
m_gin4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin4)
# retirando a variávelBiceps + com p-valor de 0.27592 no teste t.
m_gin5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin5)
# retirando a variávelNeck + com p-valor de 0.27592 no teste t.
m_gin6 <- glm(formula = Percent ~ Age + Weigth +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin6)
# retirando a variável Age + com p-valor de 0.27592 no teste t.
m_ginf <- glm(formula = Percent ~ Weigth +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_ginf)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo gaussiano com função de
# ligação inversa.
##
# modelo Gamma com função de ligação inversa.
m_gai <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai)
# retirando a variável Heigth com p-valor de 0.9354 no teste t.
m_gai1 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai1)
# retirando a variável Ankle com p-valor de 0.62681 no teste t.
m_gai2 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai2)
# retirando a variável Neck com p-valor de 0.19004 no teste t.
m_gai3 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai3)
# retirando a variável Chest com p-valor de 0.61462 no teste t.
m_gai4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai4)
# retirando a variável Age com p-valor de 0.122481 no teste t.
m_gai5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai5)
# retirando a variável Biceps + com p-valor de 0.20824 no teste t.
m_gai5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai5)
# retirando a variávelNeck + com p-valor de 0.14161 no teste t.
m_gaif <- glm(formula = Percent ~ Age + Weigth +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gaif)
# E ficou sendo o modelo final passando pelo teste t com 8 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo Gamma com função de
# ligação inversa.
# modelo Gamma com função de ligação identidade.
m_gaid <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid)
# retirando a variável Chest com p-valor de 0.95040 no teste t.
m_gaid1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid1)
# retirando a variável Heigth com p-valor de 0.885134 no teste t.
m_gaid2 <- glm(formula = Percent ~ 0 + Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid2)
# retirando a variável Ankle + com p-valor de 0.46439 no teste t.
m_gaid3 <- glm(formula = Percent ~ 0 + Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid3)
# retirando a variável knee + com p-valor de 0.51843 no teste t.
m_gaid4 <- glm(formula = Percent ~ 0 + Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid4)
# retirando a variável Biceps + com p-valor de 0.36870 no teste t.
m_gaid5 <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid5)
# retirando a variável Knee com p-valor de 0.275890 no teste t.
m_gaidf <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaidf)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# sendo siginificativas para o modelo Gamma com função de
# ligação inversa.
m_igmu <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu)
# Retirando a variável Heigth + com p-valor de 0.93452 no teste t.
m_igmu1 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu1)
# Retirando a variável Ankle + com p-valor de 0.692248 no teste t.
m_igmu2 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu2)
# Retirando a variável Chest + com p-valor de 0.593119 no teste t.
m_igmu3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu3)
# Retirando a variável Neck + com p-valor de 0.200119 no teste t.
m_igmu4 <- glm(formula = Percent ~ Age + Weigth +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu4)
# Retirando a variável Biceps + com p-valor de 0.24827 no teste t.
m_igmu5 <- glm(formula = Percent ~ Age + Weigth +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu5)
# Retirando a variável Age + com p-valor de 0.121001 no teste t.
m_igmu6 <- glm(formula = Percent ~ Weigth + Abdomen + Hip + Thigh + Knee +
Forearm + Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu6)
# Retirando a variável + Wrist com p-valor de 0.286829 no teste t.
m_igmuf <- glm(formula = Percent ~ Weigth + Abdomen + Hip + Thigh + Knee +
Forearm , family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmuf)
# E ficou sendo o modelo final passando pelo teste t com 6 variáveis explicativas
# sendo siginificativas para o modelo Inversa Gaussiana com função de
# ligação "1/mu^2".
# modelo Gamma com função de ligação log.
m_gal <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal)
# retirando a variável Heigth com p-valor de 0.81011 no teste t.
m_gal1 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal1)
# retirando a variavel Chest com p-valor de 0.75295 no teste t.
m_gal2 <- glm(formula = Percent ~ 0+ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal2)
# retirando a variável Hip + com p-valor de 0.8846 no teste t.
m_gal3 <- glm(formula = Percent ~ 0+ Age + Weigth + Neck +
Abdomen + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal3)
# retirando a variável Neck + com p-valor de 0.49755 no teste t.
m_gal4 <- glm(formula = Percent ~ 0+ Age + Weigth +
Abdomen + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal4)
# retirando a variável Ankle + com p-valor de 0.1861 no teste t.
m_gal4 <- glm(formula = Percent ~ 0+ Age + Weigth +
Abdomen + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal4)
# retirando a variável Biceps com p-valor de 0.1626 no teste t.
m_galf <- glm(formula = Percent ~ 0+ Age + Weigth +
Abdomen + Thigh + Knee + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_galf)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# sendo siginificativas para o modelo Gamma com função de ligação log.
# modelo gaussiana inversa com funcao de ligacao inverse.
m_ig_inverse <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse)
# Retirando a variável Chest + com p-valor 0.718193no teste t.
m_ig_inverse1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse1)
# Retirando a variável Ankle com p-valor 0.58826no teste t.
m_ig_inverse2 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee +Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse2)
# Retirando a variável Ankle com p-valor 0.58826no teste t.
m_ig_inverse3 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee +Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse3)
# Retirando a variável Heigth + com p-valor 0.56512 no teste t.
m_ig_inverse4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee +Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse4)
# Retirando a variável Knee + com p-valor 0.56512 no teste t.
m_ig_inverse5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse5)
# Retirando a variável Biceps + com p-valor 0.20463 no teste t.
m_ig_inversef <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inversef)
# E ficou sendo o modelo final passando pelo teste t com 8 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo Inversa Gaussiana com função de ligação inversa.
# Modelo com inversa gaussiana com função de ligação identidade.
m_ig1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig1)
# Retirando a variável Heigth + com p-valor 0.847767 no teste t.
m_ig2 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig2)
# Retirando a variável Chest + com p-valor 0.712841 no teste t.
m_ig3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig3)
# Retirando o intercepto com p-valor 0.712841 no teste t.
m_ig4 <- glm(formula = Percent ~ 0 + Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig4)
# Retirando a variável Weigth + com p-valor0.708071 no teste t.
m_ig5 <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig5)
# Retirando a variável Ankle + com p-valor 0.438746 no teste t.
m_ig6 <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Knee + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig6)
# Retirando a variável Biceps + com p-valor 0.444659 no teste t.
m_ig7 <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Knee +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig7)
# Retirando a variável Knee + com p-valor 0.186663 no teste t.
m_ig8 <- glm(formula = Percent ~ 0 + Age + Neck + Abdomen + Hip + Thigh +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig8)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# sendo siginificativas para o modelo gaussiana inversa com função de ligação log.
# Modelo com inversa gaussiana com função de ligação log.
m_igl <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl)
# Retirando a variável Chest + com p-valor 0.93640 no teste t.
m_igl1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl1)
# Retirando a variável Knee + com p-valor 0.70975 no teste t.
m_igl2 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl2)
# Retirando a variável Heigth + com p-valor 0.60186 no teste t.
m_igl3 <- glm(formula = Percent ~ Age + Weigth +Neck +
Abdomen + Hip + Thigh + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl3)
# Retirando a variável Ankle + com p-valor 0.491255 no teste t.
m_igl4 <- glm(formula = Percent ~ Age + Weigth +Neck +
Abdomen + Hip + Thigh + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl4)
# Retirando a variável Biceps + com p-valor 0.209791 no teste t.
m_igl5 <- glm(formula = Percent ~ Age + Weigth +Neck +
Abdomen + Hip + Thigh +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl5)
# Retirando a variável Weigth + com p-valor 0.139854 no teste t.
m_iglf <- glm(formula = Percent ~ Age + Neck + Abdomen + Hip + Thigh +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_iglf)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo gaussiana inversa com função de ligação log.
| /Script_MLG_validacao_cruzada.R | no_license | conexaomundom/Regressao | R | false | false | 23,838 | r |
rm(list = ls())
# Dados
banco <- bodPercentfat
banco
head(banco)
attach(banco)
names(banco)
typeof(banco)
# Avaliando a variável y,no caso a variável Percent, que tem uma observação que é 0
# e isso tem bloqueado que diversos outros modelos possam rodar.
# Dado essa situação uma alternativa é somar uma constante. Para não apenas
# sair somando sem fundo nem nexo a ideia e avaliar como se comporta a variável Percent
# olhar sua média, sua mediana e avalar qual dessas medidas de posição seria mais
# adequada pra somar. ou apenas somar um 10.
banco[182, ]
banco$Percent <- banco$Percent + 10
banco
# Avaliando a variável y,no caso a variável Percent, que tem uma observação que é 0
# e isso tem bloqueado que diversos outros modelos possam rodar.
# Dado essa situação uma alternativa é somar uma constante. Para não apenas
# sair somando sem fundo nem nexo a ideia e avaliar como se comporta a variável Percent
# olhar sua média, sua mediana e avalar qual dessas medidas de posição seria mais
# adequada pra somar. ou apenas somar um 10.
# Como a variável resposta é continua e não é contagem, podemos usar
# as distribuições gaussiana, Gamma, inverse.gaussian
# nao podemos usar binomial, quasibinomial, nem opisson ou quasipoisson
# vamos fazer os modelos com todas as funções de ligação possível.
# Primeiramente com todas as variáveis e em seguinda ir fazendo a seleção
# de variaveis.
# Rodar modelos MGL's e ver qual eh melhor por validacao cruzada.
# começando a seleção de variáveis ao nível de signiicancia de 10%.
# Modelo gaussiano com função de ligação identidade.
m_gi <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi)
# retirando a variável Knee com p-valor de 0.94970 no teste t.
m_gi1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi1)
# retirando a variável Chest com p-valor de 0.80454 no teste t.
m_gi2 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi2)
# retirando a variável Heigth com p-valor de 0.49280 no teste t.
m_gi3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi3)
# retirando a variável Ankle com p-valor de 0.39568 no teste t.
m_gi4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Biceps + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi4)
# retirando a variável Biceps com p-valor de 0.28878 no teste t.
m_gi5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gi5)
# retirando a variável Hip com p-valor de 0.15940 no teste t.
m_gif <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Thigh + Forearm +
Wrist, family = gaussian(link = "identity"), data=banco)
summary(m_gif)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo gaussiano com função de
# ligação identidade.
# Modelo gaussiano com função de ligação log.
m_gl <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl)
# retirando a variável Chest + com p-valor de 0.641041 no teste t.
m_gl1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl1)
# retirando a variável Heigth + com p-valor de 0.587286 no teste t.
m_gl2 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl2)
# retirando a variável Ankle +com p-valor de 0.449845 no teste t.
m_gl3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl3)
# retirando a variável Biceps + com p-valor de 0.271758 no teste t.
m_gl4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_gl4)
# retirando a variável Knee + com p-valor de 0.25300 no teste t.
m_glf <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Forearm +
Wrist, family = gaussian(link = "log"), data=banco)
summary(m_glf)
# E ficou sendo o modelo final passando pelo teste t com 8 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo gaussiano com função de
# ligação log.
# modelo gaussiano com função de ligação inversa.
m_gin <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin)
# retirando a variável Ankle com p-valor de 0.48380 no teste t.
m_gin1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin1)
# retirando a variável Chest com p-valor de 0.41261 no teste t.
m_gin2 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin2)
# retirando a variável Heigth com p-valor de 0.37424 no teste t.
m_gin3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin3)
# retirando a variável Heigth com p-valor de 0.37424 no teste t.
m_gin4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin4)
# retirando a variávelBiceps + com p-valor de 0.27592 no teste t.
m_gin5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin5)
# retirando a variávelNeck + com p-valor de 0.27592 no teste t.
m_gin6 <- glm(formula = Percent ~ Age + Weigth +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_gin6)
# retirando a variável Age + com p-valor de 0.27592 no teste t.
m_ginf <- glm(formula = Percent ~ Weigth +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = gaussian(link = "inverse"), data=banco)
summary(m_ginf)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo gaussiano com função de
# ligação inversa.
##
# modelo Gamma com função de ligação inversa.
m_gai <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai)
# retirando a variável Heigth com p-valor de 0.9354 no teste t.
m_gai1 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai1)
# retirando a variável Ankle com p-valor de 0.62681 no teste t.
m_gai2 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai2)
# retirando a variável Neck com p-valor de 0.19004 no teste t.
m_gai3 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai3)
# retirando a variável Chest com p-valor de 0.61462 no teste t.
m_gai4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai4)
# retirando a variável Age com p-valor de 0.122481 no teste t.
m_gai5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai5)
# retirando a variável Biceps + com p-valor de 0.20824 no teste t.
m_gai5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gai5)
# retirando a variávelNeck + com p-valor de 0.14161 no teste t.
m_gaif <- glm(formula = Percent ~ Age + Weigth +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = Gamma(link = "inverse"), data=banco)
summary(m_gaif)
# E ficou sendo o modelo final passando pelo teste t com 8 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo Gamma com função de
# ligação inversa.
# modelo Gamma com função de ligação identidade.
m_gaid <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid)
# retirando a variável Chest com p-valor de 0.95040 no teste t.
m_gaid1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid1)
# retirando a variável Heigth com p-valor de 0.885134 no teste t.
m_gaid2 <- glm(formula = Percent ~ 0 + Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid2)
# retirando a variável Ankle + com p-valor de 0.46439 no teste t.
m_gaid3 <- glm(formula = Percent ~ 0 + Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid3)
# retirando a variável knee + com p-valor de 0.51843 no teste t.
m_gaid4 <- glm(formula = Percent ~ 0 + Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Biceps + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid4)
# retirando a variável Biceps + com p-valor de 0.36870 no teste t.
m_gaid5 <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaid5)
# retirando a variável Knee com p-valor de 0.275890 no teste t.
m_gaidf <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Forearm +
Wrist, family = Gamma(link = "identity"), data=banco)
summary(m_gaidf)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# sendo siginificativas para o modelo Gamma com função de
# ligação inversa.
m_igmu <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu)
# Retirando a variável Heigth + com p-valor de 0.93452 no teste t.
m_igmu1 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu1)
# Retirando a variável Ankle + com p-valor de 0.692248 no teste t.
m_igmu2 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu2)
# Retirando a variável Chest + com p-valor de 0.593119 no teste t.
m_igmu3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu3)
# Retirando a variável Neck + com p-valor de 0.200119 no teste t.
m_igmu4 <- glm(formula = Percent ~ Age + Weigth +
Abdomen + Hip + Thigh + Knee + Biceps + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu4)
# Retirando a variável Biceps + com p-valor de 0.24827 no teste t.
m_igmu5 <- glm(formula = Percent ~ Age + Weigth +
Abdomen + Hip + Thigh + Knee + Forearm +
Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu5)
# Retirando a variável Age + com p-valor de 0.121001 no teste t.
m_igmu6 <- glm(formula = Percent ~ Weigth + Abdomen + Hip + Thigh + Knee +
Forearm + Wrist, family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmu6)
# Retirando a variável + Wrist com p-valor de 0.286829 no teste t.
m_igmuf <- glm(formula = Percent ~ Weigth + Abdomen + Hip + Thigh + Knee +
Forearm , family = inverse.gaussian(link = "1/mu^2"), data=banco)
summary(m_igmuf)
# E ficou sendo o modelo final passando pelo teste t com 6 variáveis explicativas
# sendo siginificativas para o modelo Inversa Gaussiana com função de
# ligação "1/mu^2".
# modelo Gamma com função de ligação log.
m_gal <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal)
# retirando a variável Heigth com p-valor de 0.81011 no teste t.
m_gal1 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal1)
# retirando a variavel Chest com p-valor de 0.75295 no teste t.
m_gal2 <- glm(formula = Percent ~ 0+ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal2)
# retirando a variável Hip + com p-valor de 0.8846 no teste t.
m_gal3 <- glm(formula = Percent ~ 0+ Age + Weigth + Neck +
Abdomen + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal3)
# retirando a variável Neck + com p-valor de 0.49755 no teste t.
m_gal4 <- glm(formula = Percent ~ 0+ Age + Weigth +
Abdomen + Thigh + Knee + Ankle + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal4)
# retirando a variável Ankle + com p-valor de 0.1861 no teste t.
m_gal4 <- glm(formula = Percent ~ 0+ Age + Weigth +
Abdomen + Thigh + Knee + Biceps + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_gal4)
# retirando a variável Biceps com p-valor de 0.1626 no teste t.
m_galf <- glm(formula = Percent ~ 0+ Age + Weigth +
Abdomen + Thigh + Knee + Forearm +
Wrist, family = Gamma(link = "log"), data=banco)
summary(m_galf)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# sendo siginificativas para o modelo Gamma com função de ligação log.
# modelo gaussiana inversa com funcao de ligacao inverse.
m_ig_inverse <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse)
# Retirando a variável Chest + com p-valor 0.718193no teste t.
m_ig_inverse1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse1)
# Retirando a variável Ankle com p-valor 0.58826no teste t.
m_ig_inverse2 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee +Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse2)
# Retirando a variável Ankle com p-valor 0.58826no teste t.
m_ig_inverse3 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee +Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse3)
# Retirando a variável Heigth + com p-valor 0.56512 no teste t.
m_ig_inverse4 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee +Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse4)
# Retirando a variável Knee + com p-valor 0.56512 no teste t.
m_ig_inverse5 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inverse5)
# Retirando a variável Biceps + com p-valor 0.20463 no teste t.
m_ig_inversef <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh +
Forearm + Wrist, family = inverse.gaussian(link = "inverse"), data=banco)
summary(m_ig_inversef)
# E ficou sendo o modelo final passando pelo teste t com 8 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo Inversa Gaussiana com função de ligação inversa.
# Modelo com inversa gaussiana com função de ligação identidade.
m_ig1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig1)
# Retirando a variável Heigth + com p-valor 0.847767 no teste t.
m_ig2 <- glm(formula = Percent ~ Age + Weigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig2)
# Retirando a variável Chest + com p-valor 0.712841 no teste t.
m_ig3 <- glm(formula = Percent ~ Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig3)
# Retirando o intercepto com p-valor 0.712841 no teste t.
m_ig4 <- glm(formula = Percent ~ 0 + Age + Weigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig4)
# Retirando a variável Weigth + com p-valor0.708071 no teste t.
m_ig5 <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig5)
# Retirando a variável Ankle + com p-valor 0.438746 no teste t.
m_ig6 <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Knee + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig6)
# Retirando a variável Biceps + com p-valor 0.444659 no teste t.
m_ig7 <- glm(formula = Percent ~ 0 + Age + Neck +
Abdomen + Hip + Thigh + Knee +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig7)
# Retirando a variável Knee + com p-valor 0.186663 no teste t.
m_ig8 <- glm(formula = Percent ~ 0 + Age + Neck + Abdomen + Hip + Thigh +
Forearm + Wrist, family = inverse.gaussian(link = "identity"), data=banco)
summary(m_ig8)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# sendo siginificativas para o modelo gaussiana inversa com função de ligação log.
# Modelo com inversa gaussiana com função de ligação log.
m_igl <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck + Chest +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl)
# Retirando a variável Chest + com p-valor 0.93640 no teste t.
m_igl1 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Knee + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl1)
# Retirando a variável Knee + com p-valor 0.70975 no teste t.
m_igl2 <- glm(formula = Percent ~ Age + Weigth + Heigth + Neck +
Abdomen + Hip + Thigh + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl2)
# Retirando a variável Heigth + com p-valor 0.60186 no teste t.
m_igl3 <- glm(formula = Percent ~ Age + Weigth +Neck +
Abdomen + Hip + Thigh + Ankle + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl3)
# Retirando a variável Ankle + com p-valor 0.491255 no teste t.
m_igl4 <- glm(formula = Percent ~ Age + Weigth +Neck +
Abdomen + Hip + Thigh + Biceps +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl4)
# Retirando a variável Biceps + com p-valor 0.209791 no teste t.
m_igl5 <- glm(formula = Percent ~ Age + Weigth +Neck +
Abdomen + Hip + Thigh +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_igl5)
# Retirando a variável Weigth + com p-valor 0.139854 no teste t.
m_iglf <- glm(formula = Percent ~ Age + Neck + Abdomen + Hip + Thigh +
Forearm + Wrist, family = inverse.gaussian(link = "log"), data=banco)
summary(m_iglf)
# E ficou sendo o modelo final passando pelo teste t com 7 variáveis explicativas
# mais o intercepto sendo siginificativas para o modelo gaussiana inversa com função de ligação log.
|
#s=0, simple LR, without regularization
#nTimes runs
#LOO cross-validation.
require(glmnet)
require(caret)
require(ROCR)
require(bioDist)
LOOCV_Glmnet_simple<- function(dataFrame, targetVec, logisticRegression = FALSE, useAUC = TRUE, useHamming = FALSE, useProbPearson = FALSE, useKendallTau = FALSE){
if (logisticRegression == FALSE){
predictionVec = c()
for (k in 1:ncol(dataFrame)){
train = t(as.matrix(dataFrame[,-k]))
test = t(as.matrix(dataFrame[,k]))
fit <- glmnet(train, targetVec[-k])
predictionVec[k] <- predict(fit, newx = test, s=0)
}
finalRsquare <- round(cor(predictionVec,targetVec),3)
return(finalRsquare)
} else{
#change target vec into zero-one binary vector
targetVecFactor <- as.factor(targetVec)
levels(targetVecFactor) <- c(0,1)
#MUST into character before into numeric for "factor to numeric"
targetVecBinary <- as.numeric(as.character(targetVecFactor))
outputAuc <- NA
outputPearson <- NA
outputHamming <- NA
outputTau <- NA
vecReturn <- c()
testPredictionClassVec <- c()
testPredictionProbVec <- c()
for (k in 1:ncol(dataFrame)){
#results for each run of LOOCV
#train and test data
train = t(as.matrix(dataFrame[,-k]))
test = t(as.matrix(dataFrame[,k]))
#fit the model and predict. Give both probability or class prediction results.
fit <- glmnet(train, targetVecBinary[-k],family = "binomial")
testPredictionProbVec[k] <- predict(fit,type="response", newx = test, s = 0)
testPredictionClassVec[k] <- predict(fit,type="class", newx = test, s = 0)
}
testPredictionProbVec <- as.numeric(testPredictionProbVec)
testPredictionClassVec <- as.numeric(testPredictionClassVec)
if (useAUC == TRUE){
pred <- prediction(testPredictionProbVec, targetVecBinary)
auc <- performance(pred,"auc")
outputAuc <- unlist(slot(auc, "y.values"))
}
if (useHamming == TRUE){
outputHamming <- sum(testPredictionClassVec == targetVecBinary) / length(targetVecBinary)
}
if (useProbPearson == TRUE){
tmp1 <- testPredictionProbVec
tmp2 <- targetVecBinary
tmp1 <- tmp1[1] + 0.001
tmp2 <- tmp2[1] + 0.001
outputPearson <- cor(tmp1, tmp2)
}
if (useKendallTau == TRUE){
Matrix_tau <- t(as.matrix(data.frame(testPredictionProbVec, targetVecBinary)))
outputTau <- tau.dist(Matrix_tau)[1]
}
vecReturn <- c(outputAuc, outputPearson, outputHamming, outputTau)
names(vecReturn) <- c("AUC", "Pearson","Hamming","Tau")
return(vecReturn)
}
}
| /LOOCV_Glmnet_simple.R | no_license | jasonzhao0307/R_lib_jason | R | false | false | 2,500 | r | #s=0, simple LR, without regularization
#nTimes runs
#LOO cross-validation.
require(glmnet)
require(caret)
require(ROCR)
require(bioDist)
LOOCV_Glmnet_simple<- function(dataFrame, targetVec, logisticRegression = FALSE, useAUC = TRUE, useHamming = FALSE, useProbPearson = FALSE, useKendallTau = FALSE){
if (logisticRegression == FALSE){
predictionVec = c()
for (k in 1:ncol(dataFrame)){
train = t(as.matrix(dataFrame[,-k]))
test = t(as.matrix(dataFrame[,k]))
fit <- glmnet(train, targetVec[-k])
predictionVec[k] <- predict(fit, newx = test, s=0)
}
finalRsquare <- round(cor(predictionVec,targetVec),3)
return(finalRsquare)
} else{
#change target vec into zero-one binary vector
targetVecFactor <- as.factor(targetVec)
levels(targetVecFactor) <- c(0,1)
#MUST into character before into numeric for "factor to numeric"
targetVecBinary <- as.numeric(as.character(targetVecFactor))
outputAuc <- NA
outputPearson <- NA
outputHamming <- NA
outputTau <- NA
vecReturn <- c()
testPredictionClassVec <- c()
testPredictionProbVec <- c()
for (k in 1:ncol(dataFrame)){
#results for each run of LOOCV
#train and test data
train = t(as.matrix(dataFrame[,-k]))
test = t(as.matrix(dataFrame[,k]))
#fit the model and predict. Give both probability or class prediction results.
fit <- glmnet(train, targetVecBinary[-k],family = "binomial")
testPredictionProbVec[k] <- predict(fit,type="response", newx = test, s = 0)
testPredictionClassVec[k] <- predict(fit,type="class", newx = test, s = 0)
}
testPredictionProbVec <- as.numeric(testPredictionProbVec)
testPredictionClassVec <- as.numeric(testPredictionClassVec)
if (useAUC == TRUE){
pred <- prediction(testPredictionProbVec, targetVecBinary)
auc <- performance(pred,"auc")
outputAuc <- unlist(slot(auc, "y.values"))
}
if (useHamming == TRUE){
outputHamming <- sum(testPredictionClassVec == targetVecBinary) / length(targetVecBinary)
}
if (useProbPearson == TRUE){
tmp1 <- testPredictionProbVec
tmp2 <- targetVecBinary
tmp1 <- tmp1[1] + 0.001
tmp2 <- tmp2[1] + 0.001
outputPearson <- cor(tmp1, tmp2)
}
if (useKendallTau == TRUE){
Matrix_tau <- t(as.matrix(data.frame(testPredictionProbVec, targetVecBinary)))
outputTau <- tau.dist(Matrix_tau)[1]
}
vecReturn <- c(outputAuc, outputPearson, outputHamming, outputTau)
names(vecReturn) <- c("AUC", "Pearson","Hamming","Tau")
return(vecReturn)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_Theta.R
\name{compute_Theta}
\alias{compute_Theta}
\title{compute_Theta}
\usage{
compute_Theta(Phi, B_0)
}
\arguments{
\item{B_0}{}
}
\value{
}
\description{
Helper function used to compute Theta coefficients as in Kilian, Luetkepohl
}
| /man/compute_Theta.Rd | permissive | pat-alt/deepvars | R | false | true | 323 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_Theta.R
\name{compute_Theta}
\alias{compute_Theta}
\title{compute_Theta}
\usage{
compute_Theta(Phi, B_0)
}
\arguments{
\item{B_0}{}
}
\value{
}
\description{
Helper function used to compute Theta coefficients as in Kilian, Luetkepohl
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/status.R
\name{status}
\alias{status}
\title{Status}
\usage{
status(project = NULL)
}
\arguments{
\item{project}{The project directory. If \code{NULL}, then the active
project will be used. If no project has been specifically activated,
the current working directory is used.}
}
\description{
Report differences between the project's lockfile and the current state of
the private library (if any).
}
| /man/status.Rd | no_license | shrektan/renv | R | false | true | 478 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/status.R
\name{status}
\alias{status}
\title{Status}
\usage{
status(project = NULL)
}
\arguments{
\item{project}{The project directory. If \code{NULL}, then the active
project will be used. If no project has been specifically activated,
the current working directory is used.}
}
\description{
Report differences between the project's lockfile and the current state of
the private library (if any).
}
|
rm(list = ls())
set.seed(1985)
dig=3
setwd('~/Dropbox/Share/Cheng&Yan/matinv/Simulation/')
library('stargazer')
TT<-array(0,dim=c(8,500,3));
for (id in 1:3)
{
TT[,,id]=readRDS(paste('time',id,'-tab1','.rds',sep =''))[-(1:5),]
}
Re<-matrix(0,24,5)
ff<-function(obj){
m<-format(round(apply(obj,1,mean,na.rm=TRUE),dig),nsmall =dig)
s<-format(round(apply(obj,1,sd,na.rm=TRUE),dig),nsmall =dig)
l<-rep('(',length(m));
r<-rep(')',length(m));
R1=paste(m,l,s,r,sep='')
return(R1)
}
for (k in 1:5)
{Re[1:8,k]=ff(TT[,1:100+(k-1)*100,1]);
Re[9:16,k]=ff(TT[,1:100+(k-1)*100,2]);
Re[17:24,k]=ff(TT[,1:100+(k-1)*100,3]);
}
colnames(Re)<-c('p=100','p=200','p=400','p=800','p=1600');
Re<-cbind(rep(c('CLIME','glasso','BigQuic','glasso-ADMM','SCIO','EQUAL','D-trace',' EQUALs'),3),Re)
stargazer(Re)
| /Simulation/tab1.R | no_license | cescwang85/EQUAL | R | false | false | 797 | r | rm(list = ls())
set.seed(1985)
dig=3
setwd('~/Dropbox/Share/Cheng&Yan/matinv/Simulation/')
library('stargazer')
TT<-array(0,dim=c(8,500,3));
for (id in 1:3)
{
TT[,,id]=readRDS(paste('time',id,'-tab1','.rds',sep =''))[-(1:5),]
}
Re<-matrix(0,24,5)
ff<-function(obj){
m<-format(round(apply(obj,1,mean,na.rm=TRUE),dig),nsmall =dig)
s<-format(round(apply(obj,1,sd,na.rm=TRUE),dig),nsmall =dig)
l<-rep('(',length(m));
r<-rep(')',length(m));
R1=paste(m,l,s,r,sep='')
return(R1)
}
for (k in 1:5)
{Re[1:8,k]=ff(TT[,1:100+(k-1)*100,1]);
Re[9:16,k]=ff(TT[,1:100+(k-1)*100,2]);
Re[17:24,k]=ff(TT[,1:100+(k-1)*100,3]);
}
colnames(Re)<-c('p=100','p=200','p=400','p=800','p=1600');
Re<-cbind(rep(c('CLIME','glasso','BigQuic','glasso-ADMM','SCIO','EQUAL','D-trace',' EQUALs'),3),Re)
stargazer(Re)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cognitoidentityprovider_operations.R
\name{admin_create_user}
\alias{admin_create_user}
\title{Creates a new user in the specified user pool}
\usage{
admin_create_user(UserPoolId, Username, UserAttributes = NULL,
ValidationData = NULL, TemporaryPassword = NULL,
ForceAliasCreation = NULL, MessageAction = NULL,
DesiredDeliveryMediums = NULL)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID for the user pool where the user will be created.}
\item{Username}{[required] The username for the user. Must be unique within the user pool. Must be a UTF-8 string between 1 and 128 characters. After the user is created, the username cannot be changed.}
\item{UserAttributes}{An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than \code{Username}. However, any attributes that you specify as required (in or in the \strong{Attributes} tab of the console) must be supplied either by you (in your call to \code{AdminCreateUser}) or by the user (when he or she signs up in response to your welcome message).
For custom attributes, you must prepend the \code{custom:} prefix to the attribute name.
To send a message inviting the user to sign up, you must specify the user's email address or phone number. This can be done in your call to AdminCreateUser or in the \strong{Users} tab of the Amazon Cognito console for managing your user pools.
In your call to \code{AdminCreateUser}, you can set the \code{email_verified} attribute to \code{True}, and you can set the \code{phone_number_verified} attribute to \code{True}. (You can also do this by calling .)
\itemize{
\item \strong{email}: The email address of the user to whom the message that contains the code and username will be sent. Required if the \code{email_verified} attribute is set to \code{True}, or if \code{"EMAIL"} is specified in the \code{DesiredDeliveryMediums} parameter.
\item \strong{phone\_number}: The phone number of the user to whom the message that contains the code and username will be sent. Required if the \code{phone_number_verified} attribute is set to \code{True}, or if \code{"SMS"} is specified in the \code{DesiredDeliveryMediums} parameter.
}}
\item{ValidationData}{The user's validation data. This is an array of name-value pairs that contain user attributes and attribute values that you can use for custom validation, such as restricting the types of user accounts that can be registered. For example, you might choose to allow or disallow user sign-up based on the user's domain.
To configure custom validation, you must create a Pre Sign-up Lambda trigger for the user pool as described in the Amazon Cognito Developer Guide. The Lambda trigger receives the validation data and uses it in the validation process.
The user's validation data is not persisted.}
\item{TemporaryPassword}{The user's temporary password. This password must conform to the password policy that you specified when you created the user pool.
The temporary password is valid only once. To complete the Admin Create User flow, the user must enter the temporary password in the sign-in page along with a new password to be used in all future sign-ins.
This parameter is not required. If you do not specify a value, Amazon Cognito generates one for you.
The temporary password can only be used until the user account expiration limit that you specified when you created the user pool. To reset the account after that time limit, you must call \code{AdminCreateUser} again, specifying \code{"RESEND"} for the \code{MessageAction} parameter.}
\item{ForceAliasCreation}{This parameter is only used if the \code{phone_number_verified} or \code{email_verified} attribute is set to \code{True}. Otherwise, it is ignored.
If this parameter is set to \code{True} and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.
If this parameter is set to \code{False}, the API throws an \code{AliasExistsException} error if the alias already exists. The default value is \code{False}.}
\item{MessageAction}{Set to \code{"RESEND"} to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to \code{"SUPPRESS"} to suppress sending the message. Only one value can be specified.}
\item{DesiredDeliveryMediums}{Specify \code{"EMAIL"} if email will be used to send the welcome message. Specify \code{"SMS"} if the phone number will be used. The default value is \code{"SMS"}. More than one value can be specified.}
}
\description{
Creates a new user in the specified user pool.
}
\details{
If \code{MessageAction} is not set, the default is to send a welcome message via email or phone (SMS).
This message is based on a template that you configured in your call to or . This template includes your custom sign-up instructions and placeholders for user name and temporary password.
Alternatively, you can call AdminCreateUser with "SUPPRESS" for the \code{MessageAction} parameter, and Amazon Cognito will not send any email.
In either case, the user will be in the \code{FORCE_CHANGE_PASSWORD} state until they sign in and change their password.
AdminCreateUser requires developer credentials.
}
\section{Accepted Parameters}{
\preformatted{admin_create_user(
UserPoolId = "string",
Username = "string",
UserAttributes = list(
list(
Name = "string",
Value = "string"
)
),
ValidationData = list(
list(
Name = "string",
Value = "string"
)
),
TemporaryPassword = "string",
ForceAliasCreation = TRUE|FALSE,
MessageAction = "RESEND"|"SUPPRESS",
DesiredDeliveryMediums = list(
"SMS"|"EMAIL"
)
)
}
}
| /service/paws.cognitoidentityprovider/man/admin_create_user.Rd | permissive | CR-Mercado/paws | R | false | true | 6,032 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cognitoidentityprovider_operations.R
\name{admin_create_user}
\alias{admin_create_user}
\title{Creates a new user in the specified user pool}
\usage{
admin_create_user(UserPoolId, Username, UserAttributes = NULL,
ValidationData = NULL, TemporaryPassword = NULL,
ForceAliasCreation = NULL, MessageAction = NULL,
DesiredDeliveryMediums = NULL)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID for the user pool where the user will be created.}
\item{Username}{[required] The username for the user. Must be unique within the user pool. Must be a UTF-8 string between 1 and 128 characters. After the user is created, the username cannot be changed.}
\item{UserAttributes}{An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than \code{Username}. However, any attributes that you specify as required (in or in the \strong{Attributes} tab of the console) must be supplied either by you (in your call to \code{AdminCreateUser}) or by the user (when he or she signs up in response to your welcome message).
For custom attributes, you must prepend the \code{custom:} prefix to the attribute name.
To send a message inviting the user to sign up, you must specify the user's email address or phone number. This can be done in your call to AdminCreateUser or in the \strong{Users} tab of the Amazon Cognito console for managing your user pools.
In your call to \code{AdminCreateUser}, you can set the \code{email_verified} attribute to \code{True}, and you can set the \code{phone_number_verified} attribute to \code{True}. (You can also do this by calling .)
\itemize{
\item \strong{email}: The email address of the user to whom the message that contains the code and username will be sent. Required if the \code{email_verified} attribute is set to \code{True}, or if \code{"EMAIL"} is specified in the \code{DesiredDeliveryMediums} parameter.
\item \strong{phone\_number}: The phone number of the user to whom the message that contains the code and username will be sent. Required if the \code{phone_number_verified} attribute is set to \code{True}, or if \code{"SMS"} is specified in the \code{DesiredDeliveryMediums} parameter.
}}
\item{ValidationData}{The user's validation data. This is an array of name-value pairs that contain user attributes and attribute values that you can use for custom validation, such as restricting the types of user accounts that can be registered. For example, you might choose to allow or disallow user sign-up based on the user's domain.
To configure custom validation, you must create a Pre Sign-up Lambda trigger for the user pool as described in the Amazon Cognito Developer Guide. The Lambda trigger receives the validation data and uses it in the validation process.
The user's validation data is not persisted.}
\item{TemporaryPassword}{The user's temporary password. This password must conform to the password policy that you specified when you created the user pool.
The temporary password is valid only once. To complete the Admin Create User flow, the user must enter the temporary password in the sign-in page along with a new password to be used in all future sign-ins.
This parameter is not required. If you do not specify a value, Amazon Cognito generates one for you.
The temporary password can only be used until the user account expiration limit that you specified when you created the user pool. To reset the account after that time limit, you must call \code{AdminCreateUser} again, specifying \code{"RESEND"} for the \code{MessageAction} parameter.}
\item{ForceAliasCreation}{This parameter is only used if the \code{phone_number_verified} or \code{email_verified} attribute is set to \code{True}. Otherwise, it is ignored.
If this parameter is set to \code{True} and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.
If this parameter is set to \code{False}, the API throws an \code{AliasExistsException} error if the alias already exists. The default value is \code{False}.}
\item{MessageAction}{Set to \code{"RESEND"} to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to \code{"SUPPRESS"} to suppress sending the message. Only one value can be specified.}
\item{DesiredDeliveryMediums}{Specify \code{"EMAIL"} if email will be used to send the welcome message. Specify \code{"SMS"} if the phone number will be used. The default value is \code{"SMS"}. More than one value can be specified.}
}
\description{
Creates a new user in the specified user pool.
}
\details{
If \code{MessageAction} is not set, the default is to send a welcome message via email or phone (SMS).
This message is based on a template that you configured in your call to or . This template includes your custom sign-up instructions and placeholders for user name and temporary password.
Alternatively, you can call AdminCreateUser with "SUPPRESS" for the \code{MessageAction} parameter, and Amazon Cognito will not send any email.
In either case, the user will be in the \code{FORCE_CHANGE_PASSWORD} state until they sign in and change their password.
AdminCreateUser requires developer credentials.
}
\section{Accepted Parameters}{
\preformatted{admin_create_user(
UserPoolId = "string",
Username = "string",
UserAttributes = list(
list(
Name = "string",
Value = "string"
)
),
ValidationData = list(
list(
Name = "string",
Value = "string"
)
),
TemporaryPassword = "string",
ForceAliasCreation = TRUE|FALSE,
MessageAction = "RESEND"|"SUPPRESS",
DesiredDeliveryMediums = list(
"SMS"|"EMAIL"
)
)
}
}
|
library(hhh4contacts)
### Name: adaptP
### Title: Adapt a Transition Matrix to a Specific Stationary Distribution
### Aliases: adaptP
### ** Examples
## a row-normalized contact matrix
C <- matrix(c(0.8, 0.1, 0.1,
0.2, 0.6, 0.2,
0.1, 0.2, 0.7), byrow=TRUE, ncol=3, nrow=3)
stationary(C)
## population fractions define the target distribution
popfracs <- c(0.4, 0.3, 0.3)
## adapt 'C' to the given population fractions
Cpop <- adaptP(C, popfracs, niter = 50000)
stationary(Cpop)
## this method increases the diagonal values of 'C'
round(C, 3)
round(Cpop, 3)
round(Cpop/C, 3)
| /data/genthat_extracted_code/hhh4contacts/examples/adaptP.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 609 | r | library(hhh4contacts)
### Name: adaptP
### Title: Adapt a Transition Matrix to a Specific Stationary Distribution
### Aliases: adaptP
### ** Examples
## a row-normalized contact matrix
C <- matrix(c(0.8, 0.1, 0.1,
0.2, 0.6, 0.2,
0.1, 0.2, 0.7), byrow=TRUE, ncol=3, nrow=3)
stationary(C)
## population fractions define the target distribution
popfracs <- c(0.4, 0.3, 0.3)
## adapt 'C' to the given population fractions
Cpop <- adaptP(C, popfracs, niter = 50000)
stationary(Cpop)
## this method increases the diagonal values of 'C'
round(C, 3)
round(Cpop, 3)
round(Cpop/C, 3)
|
calc = function(x, y, operator){
num = is.numeric(x) & is.numeric(y)
num = ifelse(num, "numeric", "none")
ans = switch(num, "numeric" = {
res = operator(x, y)
paste(x, substitute(operator), y, "is", res)
},
"none" = {
paste("Nah dude. Wrong operandos")
})
return(ans)
}
| /calc.R | no_license | AlexeiSleptcov/R_examples | R | false | false | 296 | r | calc = function(x, y, operator){
num = is.numeric(x) & is.numeric(y)
num = ifelse(num, "numeric", "none")
ans = switch(num, "numeric" = {
res = operator(x, y)
paste(x, substitute(operator), y, "is", res)
},
"none" = {
paste("Nah dude. Wrong operandos")
})
return(ans)
}
|
#' Lambertian (diffuse) Material
#'
#' @param color Default `white`. The color of the surface. Can be either
#' a hexadecimal code, R color string, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param checkercolor Default `NA`. If not `NA`, determines the secondary color of the checkered surface.
#' Can be either a hexadecimal code, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param checkerperiod Default `3`. The period of the checker pattern. Increasing this value makes the checker
#' pattern bigger, and decreasing it makes it smaller
#' @param noise Default `0`. If not `0`, covers the surface in a turbulent marble pattern. This value will determine
#' the amount of turbulence in the texture.
#' @param noisephase Default `0`. The phase of the noise. The noise will repeat at `360`.
#' @param noiseintensity Default `10`. Intensity of the noise.
#' @param noisecolor Default `#000000`. The secondary color of the noise pattern.
#' Can be either a hexadecimal code, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param image_array A 3-layer RGB array to be used as the texture on the surface of the object.
#' @param lightintensity Default `NA`. If a positive value, this will turn this object into a light emitting the value specified
#' in `color` (ignoring other properties). Higher values will produce a brighter light.
#' @param fog Default `FALSE`. If `TRUE`, the object will be a volumetric scatterer.
#' @param fogdensity Default `0.01`. The density of the fog. Higher values will produce more opaque objects.
#' @param implicit_sample Default `FALSE`, unless the object is a light. If `TRUE`, the object will
#' be sampled as part of the scattering probability density function.
#'
#' @return Single row of a tibble describing the sphere in the scene.
#' @export
#' @importFrom grDevices col2rgb
#'
#' @examples
#' #Generate the cornell box and add a single white sphere to the center
#' scene = generate_cornell() %>%
#' add_object(sphere(x=555/2,y=555/2,z=555/2,radius=555/8,material=lambertian()))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#'
#' #Add a checkered rectangular cube below
#' scene = scene %>%
#' add_object(cube(x=555/2,y=555/8,z=555/2,xwidth=555/2,ywidth=555/4,zwidth=555/2,
#' material = lambertian(checkercolor="purple",checkerperiod=20)))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#'
#' #Add a marbled sphere
#' scene = scene %>%
#' add_object(sphere(x=555/2+555/4,y=555/2,z=555/2,radius=555/8,
#' material = lambertian(noise=1/20)))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#'
#' #Add an orange volumetric (fog) cube
#' scene = scene %>%
#' add_object(cube(x=555/2-555/4,y=555/2,z=555/2,xwidth=555/4,ywidth=555/4,zwidth=555/4,
#' material = lambertian(fog=TRUE, fogdensity=0.05,color="orange")))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
lambertian = function(color = "#ffffff", checkercolor = NA, checkerperiod = 3,
noise = 0, noisephase = 0, noiseintensity = 10, noisecolor = "#000000",
image_array = NA,
lightintensity = NA, fog = FALSE, fogdensity = 0.01, implicit_sample = FALSE) {
if(all(!is.na(checkercolor))) {
checkercolor = convert_color(checkercolor)
} else {
checkercolor = NA
}
color = convert_color(color)
noisecolor = convert_color(noisecolor)
if(!is.array(image_array) && !is.na(image_array)) {
image = NA
warning("Image not in recognized format (array or matrix), ignoring")
}
assertthat::assert_that(checkerperiod != 0)
tibble::tibble(type = "lambertian",
properties = list(color), checkercolor=list(c(checkercolor,checkerperiod)),
noise=noise, noisephase = noisephase, noiseintensity = noiseintensity, noisecolor = list(noisecolor),
image = list(image_array), lightintensity = lightintensity,
fog=fog, fogdensity=fogdensity,implicit_sample = implicit_sample)
}
#' Metallic Material
#'
#' @param color Default `white`. The color of the sphere. Can be either
#' a hexadecimal code, R color string, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param fuzz Default `0`. The roughness of the metallic surface. Maximum `1`.
#' @param implicit_sample Default `FALSE`. If `TRUE`, the object will
#' be sampled as part of the scattering probability density function.
#' @importFrom grDevices col2rgb
#'
#' @return Single row of a tibble describing the sphere in the scene.
#' @export
#'
#' @examples
#' #Generate the cornell box with a single metal sphere in the center
#' scene = generate_cornell() %>%
#' add_object(sphere(x=555/2,y=555/2,z=555/2,radius=555/8,material=metal()))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#' #Add a rotated shiny metal cube
#' scene = scene %>%
#' add_object(cube(x=380,y=150/2,z=200,xwidth=150,ywidth=150,zwidth=150,
#' material = metal(color="#8B4513"),angle=c(0,45,0)))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#' #Add a brushed metal cube (setting the fuzz variable)
#' scene = scene %>%
#' add_object(cube(x=150,y=150/2,z=300,xwidth=150,ywidth=150,zwidth=150,
#' material = metal(color="#FAFAD2",fuzz=0.1),angle=c(0,-30,0)))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
metal = function(color = "#ffffff", fuzz = 0, implicit_sample = FALSE) {
color = convert_color(color)
tibble::tibble(type = "metal",
properties = list(c(color,fuzz)),
checkercolor=list(NA), noise=0, noisephase = 0, noiseintensity = 0, noisecolor = list(c(0,0,0)),
islight = FALSE, lightinfo = list(NA),
image = list(NA), lightintensity = NA,fog=FALSE,fogdensity=0.01,
implicit_sample = implicit_sample)
}
#' Dielectric (glass) Material
#'
#' @param color Default `white`. The color of the surface. Can be either
#' a hexadecimal code, R color string, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param refraction Default `1.5`. The index of refraction.
#' @param implicit_sample Default `TRUE`. If `FALSE`, the object will not
#' be sampled as part of the scattering probability density function.
#'
#' @return Single row of a tibble describing the sphere in the scene.
#' @export
#'
#' @examples
#' #Generate a checkered ground
#' scene = generate_ground(depth=-0.5,
#' material=lambertian(color="white", checkercolor="grey30",checkerperiod=2))
#' \dontrun{
#' render_scene(scene,parallel=TRUE)
#' }
#'
#' #Add a glass sphere
#' \dontrun{
#' scene %>%
#' add_object(sphere(x=-0.5,radius=0.5,material=dielectric())) %>%
#' render_scene(parallel=TRUE,samples=400)
#' }
#'
#' #Add a rotated colored glass cube
#' \dontrun{
#' scene %>%
#' add_object(sphere(x=-0.5,radius=0.5,material=dielectric())) %>%
#' add_object(cube(x=0.5,xwidth=0.5,material=dielectric(color="darkgreen"),angle=c(0,-45,0))) %>%
#' render_scene(parallel=TRUE,samples=40)
#' }
#'
#' #Add an area light behind and at an angle and turn off the ambient lighting
#' \dontrun{
#' scene %>%
#' add_object(sphere(x=-0.5,radius=0.5,material=dielectric())) %>%
#' add_object(cube(x=0.5,xwidth=0.5,material=dielectric(color="darkgreen"),angle=c(0,-45,0))) %>%
#' add_object(yz_rect(z=-3,y=1,x=0,zwidth=3,ywidth=1.5,
#' material=lambertian(lightintensity=15),
#' angle=c(0,-90,45), order_rotation = c(3,2,1))) %>%
#' render_scene(parallel=TRUE,aperture=0, ambient_light=FALSE,samples=1000)
#' }
dielectric = function(color="white", refraction = 1.5, implicit_sample = FALSE) {
color = convert_color(color)
tibble::tibble(type = "dielectric",
properties = list(c(color,refraction)),
checkercolor=list(NA), noise=0, noisephase = 0, noiseintensity = 0, noisecolor = list(c(0,0,0)),
image = list(NA), lightintensity = NA,
fog=FALSE, fogdensity=NA, implicit_sample = implicit_sample)
} | /R/materials.R | no_license | javierluraschi/rayrender | R | false | false | 8,978 | r | #' Lambertian (diffuse) Material
#'
#' @param color Default `white`. The color of the surface. Can be either
#' a hexadecimal code, R color string, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param checkercolor Default `NA`. If not `NA`, determines the secondary color of the checkered surface.
#' Can be either a hexadecimal code, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param checkerperiod Default `3`. The period of the checker pattern. Increasing this value makes the checker
#' pattern bigger, and decreasing it makes it smaller
#' @param noise Default `0`. If not `0`, covers the surface in a turbulent marble pattern. This value will determine
#' the amount of turbulence in the texture.
#' @param noisephase Default `0`. The phase of the noise. The noise will repeat at `360`.
#' @param noiseintensity Default `10`. Intensity of the noise.
#' @param noisecolor Default `#000000`. The secondary color of the noise pattern.
#' Can be either a hexadecimal code, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param image_array A 3-layer RGB array to be used as the texture on the surface of the object.
#' @param lightintensity Default `NA`. If a positive value, this will turn this object into a light emitting the value specified
#' in `color` (ignoring other properties). Higher values will produce a brighter light.
#' @param fog Default `FALSE`. If `TRUE`, the object will be a volumetric scatterer.
#' @param fogdensity Default `0.01`. The density of the fog. Higher values will produce more opaque objects.
#' @param implicit_sample Default `FALSE`, unless the object is a light. If `TRUE`, the object will
#' be sampled as part of the scattering probability density function.
#'
#' @return Single row of a tibble describing the sphere in the scene.
#' @export
#' @importFrom grDevices col2rgb
#'
#' @examples
#' #Generate the cornell box and add a single white sphere to the center
#' scene = generate_cornell() %>%
#' add_object(sphere(x=555/2,y=555/2,z=555/2,radius=555/8,material=lambertian()))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#'
#' #Add a checkered rectangular cube below
#' scene = scene %>%
#' add_object(cube(x=555/2,y=555/8,z=555/2,xwidth=555/2,ywidth=555/4,zwidth=555/2,
#' material = lambertian(checkercolor="purple",checkerperiod=20)))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#'
#' #Add a marbled sphere
#' scene = scene %>%
#' add_object(sphere(x=555/2+555/4,y=555/2,z=555/2,radius=555/8,
#' material = lambertian(noise=1/20)))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#'
#' #Add an orange volumetric (fog) cube
#' scene = scene %>%
#' add_object(cube(x=555/2-555/4,y=555/2,z=555/2,xwidth=555/4,ywidth=555/4,zwidth=555/4,
#' material = lambertian(fog=TRUE, fogdensity=0.05,color="orange")))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
lambertian = function(color = "#ffffff", checkercolor = NA, checkerperiod = 3,
noise = 0, noisephase = 0, noiseintensity = 10, noisecolor = "#000000",
image_array = NA,
lightintensity = NA, fog = FALSE, fogdensity = 0.01, implicit_sample = FALSE) {
if(all(!is.na(checkercolor))) {
checkercolor = convert_color(checkercolor)
} else {
checkercolor = NA
}
color = convert_color(color)
noisecolor = convert_color(noisecolor)
if(!is.array(image_array) && !is.na(image_array)) {
image = NA
warning("Image not in recognized format (array or matrix), ignoring")
}
assertthat::assert_that(checkerperiod != 0)
tibble::tibble(type = "lambertian",
properties = list(color), checkercolor=list(c(checkercolor,checkerperiod)),
noise=noise, noisephase = noisephase, noiseintensity = noiseintensity, noisecolor = list(noisecolor),
image = list(image_array), lightintensity = lightintensity,
fog=fog, fogdensity=fogdensity,implicit_sample = implicit_sample)
}
#' Metallic Material
#'
#' @param color Default `white`. The color of the sphere. Can be either
#' a hexadecimal code, R color string, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param fuzz Default `0`. The roughness of the metallic surface. Maximum `1`.
#' @param implicit_sample Default `FALSE`. If `TRUE`, the object will
#' be sampled as part of the scattering probability density function.
#' @importFrom grDevices col2rgb
#'
#' @return Single row of a tibble describing the sphere in the scene.
#' @export
#'
#' @examples
#' #Generate the cornell box with a single metal sphere in the center
#' scene = generate_cornell() %>%
#' add_object(sphere(x=555/2,y=555/2,z=555/2,radius=555/8,material=metal()))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#' #Add a rotated shiny metal cube
#' scene = scene %>%
#' add_object(cube(x=380,y=150/2,z=200,xwidth=150,ywidth=150,zwidth=150,
#' material = metal(color="#8B4513"),angle=c(0,45,0)))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
#' #Add a brushed metal cube (setting the fuzz variable)
#' scene = scene %>%
#' add_object(cube(x=150,y=150/2,z=300,xwidth=150,ywidth=150,zwidth=150,
#' material = metal(color="#FAFAD2",fuzz=0.1),angle=c(0,-30,0)))
#' \dontrun{
#' render_scene(scene, lookfrom=c(278,278,-800),lookat = c(278,278,0), samples=500,
#' aperture=0, fov=40, ambient_light=FALSE, parallel=TRUE)
#' }
metal = function(color = "#ffffff", fuzz = 0, implicit_sample = FALSE) {
color = convert_color(color)
tibble::tibble(type = "metal",
properties = list(c(color,fuzz)),
checkercolor=list(NA), noise=0, noisephase = 0, noiseintensity = 0, noisecolor = list(c(0,0,0)),
islight = FALSE, lightinfo = list(NA),
image = list(NA), lightintensity = NA,fog=FALSE,fogdensity=0.01,
implicit_sample = implicit_sample)
}
#' Dielectric (glass) Material
#'
#' @param color Default `white`. The color of the surface. Can be either
#' a hexadecimal code, R color string, or a numeric rgb vector listing three intensities between `0` and `1`.
#' @param refraction Default `1.5`. The index of refraction.
#' @param implicit_sample Default `TRUE`. If `FALSE`, the object will not
#' be sampled as part of the scattering probability density function.
#'
#' @return Single row of a tibble describing the sphere in the scene.
#' @export
#'
#' @examples
#' #Generate a checkered ground
#' scene = generate_ground(depth=-0.5,
#' material=lambertian(color="white", checkercolor="grey30",checkerperiod=2))
#' \dontrun{
#' render_scene(scene,parallel=TRUE)
#' }
#'
#' #Add a glass sphere
#' \dontrun{
#' scene %>%
#' add_object(sphere(x=-0.5,radius=0.5,material=dielectric())) %>%
#' render_scene(parallel=TRUE,samples=400)
#' }
#'
#' #Add a rotated colored glass cube
#' \dontrun{
#' scene %>%
#' add_object(sphere(x=-0.5,radius=0.5,material=dielectric())) %>%
#' add_object(cube(x=0.5,xwidth=0.5,material=dielectric(color="darkgreen"),angle=c(0,-45,0))) %>%
#' render_scene(parallel=TRUE,samples=40)
#' }
#'
#' #Add an area light behind and at an angle and turn off the ambient lighting
#' \dontrun{
#' scene %>%
#' add_object(sphere(x=-0.5,radius=0.5,material=dielectric())) %>%
#' add_object(cube(x=0.5,xwidth=0.5,material=dielectric(color="darkgreen"),angle=c(0,-45,0))) %>%
#' add_object(yz_rect(z=-3,y=1,x=0,zwidth=3,ywidth=1.5,
#' material=lambertian(lightintensity=15),
#' angle=c(0,-90,45), order_rotation = c(3,2,1))) %>%
#' render_scene(parallel=TRUE,aperture=0, ambient_light=FALSE,samples=1000)
#' }
dielectric = function(color="white", refraction = 1.5, implicit_sample = FALSE) {
color = convert_color(color)
tibble::tibble(type = "dielectric",
properties = list(c(color,refraction)),
checkercolor=list(NA), noise=0, noisephase = 0, noiseintensity = 0, noisecolor = list(c(0,0,0)),
image = list(NA), lightintensity = NA,
fog=FALSE, fogdensity=NA, implicit_sample = implicit_sample)
} |
\name{boot.yhat}
\alias{boot.yhat}
\title{Bootstrap metrics produced from /code{calc.yhat}}
\description{
This function is input to \code{boot} to bootstrap metrics
computed from \code{calc.yhat}.
}
\usage{
boot.yhat(data, indices, lmOut,regrout0)
}
\arguments{
\item{data}{
Original dataset
}
\item{indices}{
Vector of indices which define the bootstrap sample
}
\item{lmOut}{
Ouput of /code{lm}
}
\item{regrout0}{
Output of /code{calc.yhat}
}
} % end arguments
\details{
This function is input to \code{boot} to bootstrap metrics
computed from \code{calc.yhat}.
}
\value{
The output of \code{boot.yhat} when used in conjunction with \code{boot} is of class \code{boot} and is not further described
here. The output is designed to be useful as input for \code{booteval.yhat}
}
\references{
Nimon, K., & Oswald, F. L. (2013). Understanding the results of multiple linear regression: Beyond standardized regression coefficients. \emph{Organizational Research Methods}, \emph{16},
650-674.
}
\author{ Kim Nimon <kim.nimon@gmail.com>}
\seealso{
\code{\link{lm}}
\code{\link{calc.yhat}}
\code{\link{boot}}
\code{\link{booteval.yhat}}
}
\examples{
## Bootstrap regression results predicting paragraph
## comprehension based on three verbal tests: general info,
## sentence comprehension, & word classification
## Use HS dataset in MBESS
require ("MBESS")
data(HS.data)
## Regression
lm.out<-lm(paragrap~general+sentence+wordc,data=HS.data)
## Calculate regression metrics
regrOut<-calc.yhat(lm.out)
## Bootstrap results
require ("boot")
boot.out<-boot(HS.data,boot.yhat,100,lmOut=lm.out,regrout0=regrOut)
}
\keyword{models}
\keyword{regression} % end keywords
| /man/boot.yhat.Rd | no_license | NLangenfeldMcCoy/yhat | R | false | false | 1,847 | rd | \name{boot.yhat}
\alias{boot.yhat}
\title{Bootstrap metrics produced from /code{calc.yhat}}
\description{
This function is input to \code{boot} to bootstrap metrics
computed from \code{calc.yhat}.
}
\usage{
boot.yhat(data, indices, lmOut,regrout0)
}
\arguments{
\item{data}{
Original dataset
}
\item{indices}{
Vector of indices which define the bootstrap sample
}
\item{lmOut}{
Ouput of /code{lm}
}
\item{regrout0}{
Output of /code{calc.yhat}
}
} % end arguments
\details{
This function is input to \code{boot} to bootstrap metrics
computed from \code{calc.yhat}.
}
\value{
The output of \code{boot.yhat} when used in conjunction with \code{boot} is of class \code{boot} and is not further described
here. The output is designed to be useful as input for \code{booteval.yhat}
}
\references{
Nimon, K., & Oswald, F. L. (2013). Understanding the results of multiple linear regression: Beyond standardized regression coefficients. \emph{Organizational Research Methods}, \emph{16},
650-674.
}
\author{ Kim Nimon <kim.nimon@gmail.com>}
\seealso{
\code{\link{lm}}
\code{\link{calc.yhat}}
\code{\link{boot}}
\code{\link{booteval.yhat}}
}
\examples{
## Bootstrap regression results predicting paragraph
## comprehension based on three verbal tests: general info,
## sentence comprehension, & word classification
## Use HS dataset in MBESS
require ("MBESS")
data(HS.data)
## Regression
lm.out<-lm(paragrap~general+sentence+wordc,data=HS.data)
## Calculate regression metrics
regrOut<-calc.yhat(lm.out)
## Bootstrap results
require ("boot")
boot.out<-boot(HS.data,boot.yhat,100,lmOut=lm.out,regrout0=regrOut)
}
\keyword{models}
\keyword{regression} % end keywords
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% equals.default.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{equals}
\alias{equals.default}
\alias{equals}
\title{Compares an object with another}
\description{
Compares an object with another and returns \code{\link[base:logical]{TRUE}} if they are equal.
The equal property must be
1) \emph{reflexive}, i.e. \code{equals(o1,o1)} should be \code{\link[base:logical]{TRUE}}.
2) \emph{symmetric}, i.e. \code{equals(o1,o2)} is \code{\link[base:logical]{TRUE}} if and only
if \code{equals(o2,o1)} is \code{\link[base:logical]{TRUE}}.
3) \emph{transitive}, i.e. \code{equals(o1,o2)} is \code{\link[base:logical]{TRUE}} and
\code{equals(o2,o3)} is \code{\link[base:logical]{TRUE}}, then \code{equals(o1,o3)} should
be \code{\link[base:logical]{TRUE}}.
5) \emph{consistent}, i.e. \code{equals(o1,o2)} should return the same
result on multiple invocations as long as nothing has changed.
6) \code{equals(o1,}\code{\link[base]{NULL}}\code{)} should return \code{\link[base:logical]{FALSE}}, unless
\code{o1} is also \code{\link[base]{NULL}}.
By default \code{\link[base]{identical}}() is used.
}
\usage{
\method{equals}{default}(object, other, ...)
}
\arguments{
\item{object, other}{Objects to be compared.}
\item{...}{Not used.}
}
\value{
Returns \code{\link[base:logical]{TRUE}} if the objects are equal, otherwise \code{\link[base:logical]{FALSE}}.
}
\author{Henrik Bengtsson}
\seealso{
\code{\link[base]{identical}}().
}
\keyword{attribute}
\keyword{utilities}
\keyword{internal}
| /man/equals.Rd | no_license | HenrikBengtsson/R.oo | R | false | false | 1,755 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% equals.default.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{equals}
\alias{equals.default}
\alias{equals}
\title{Compares an object with another}
\description{
Compares an object with another and returns \code{\link[base:logical]{TRUE}} if they are equal.
The equal property must be
1) \emph{reflexive}, i.e. \code{equals(o1,o1)} should be \code{\link[base:logical]{TRUE}}.
2) \emph{symmetric}, i.e. \code{equals(o1,o2)} is \code{\link[base:logical]{TRUE}} if and only
if \code{equals(o2,o1)} is \code{\link[base:logical]{TRUE}}.
3) \emph{transitive}, i.e. \code{equals(o1,o2)} is \code{\link[base:logical]{TRUE}} and
\code{equals(o2,o3)} is \code{\link[base:logical]{TRUE}}, then \code{equals(o1,o3)} should
be \code{\link[base:logical]{TRUE}}.
5) \emph{consistent}, i.e. \code{equals(o1,o2)} should return the same
result on multiple invocations as long as nothing has changed.
6) \code{equals(o1,}\code{\link[base]{NULL}}\code{)} should return \code{\link[base:logical]{FALSE}}, unless
\code{o1} is also \code{\link[base]{NULL}}.
By default \code{\link[base]{identical}}() is used.
}
\usage{
\method{equals}{default}(object, other, ...)
}
\arguments{
\item{object, other}{Objects to be compared.}
\item{...}{Not used.}
}
\value{
Returns \code{\link[base:logical]{TRUE}} if the objects are equal, otherwise \code{\link[base:logical]{FALSE}}.
}
\author{Henrik Bengtsson}
\seealso{
\code{\link[base]{identical}}().
}
\keyword{attribute}
\keyword{utilities}
\keyword{internal}
|
library(tolBasis)
### Name: as.xts
### Title: eXtensible Time-Series (xts) Compatibility
### Aliases: as.xts.Serie
### Keywords: xts
### ** Examples
## Not run:
##D library(xts)
##D s1 <- Serie(rnorm(12), Monthly, as.Date(ymd("2001-01-01")))
##D xts1 <- as.xts(s1)
## End(Not run)
| /data/genthat_extracted_code/tolBasis/examples/as.xts.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 289 | r | library(tolBasis)
### Name: as.xts
### Title: eXtensible Time-Series (xts) Compatibility
### Aliases: as.xts.Serie
### Keywords: xts
### ** Examples
## Not run:
##D library(xts)
##D s1 <- Serie(rnorm(12), Monthly, as.Date(ymd("2001-01-01")))
##D xts1 <- as.xts(s1)
## End(Not run)
|
source('helper.R')
testMiddleNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, NA, 33, 34, NA, 36, 37, 38, 39, NA, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72
)
)
if(!is.na(data$Value[32])) {
print("Error: value[32] is avaliable")
}
if(!is.na(data$Value[35])) {
print("Error: value[35] is avaliable")
}
if(!is.na(data$Value[40])) {
print("Error: value[40] is avaliable")
}
result <- get.FillValueForTimeSeries(data)
if(result$Value[32] != 8) {
print("Error: value[32] must be 8")
}
if(result$Value[35] != 11) {
print("Error: value[35] must be 11")
}
if(result$Value[40] != 16) {
print("Error: value[40] must be 16")
}
}
testBeginNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72
)
)
result <- get.FillValueForTimeSeries(data)
for(i in 1:24) {
if(result$Value[i] != (i+24)) {
print("Error: value[i] must be i+24")
}
}
}
testEndNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA
)
)
result <- get.FillValueForTimeSeries(data)
for(i in 49:72) {
if(result$Value[i] != (i-24)) {
print("Error: value[i] must be i-24")
}
}
}
testFirstTwoNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72
)
)
result <- get.FillValueForTimeSeries(data)
for(i in 1:24) {
if(result$Value[i] != (i+48)) {
print("Error: value[i] must be i+48")
}
}
for(i in 25:48) {
if(result$Value[i] != (i+24)) {
print("Error: value[i] must be i+24")
}
}
}
testLastTwoNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA
)
)
result <- get.FillValueForTimeSeries(data)
for(i in 25:48) {
if(result$Value[i] != (i-24)) {
print("Error: value[i] must be i-24")
}
}
for(i in 49:72) {
if(result$Value[i] != (i-48)) {
print("Error: value[i] must be i-48")
}
}
}
testMiddleNA()
testBeginNA()
testEndNA()
testFirstTwoNA()
testLastTwoNA() | /test.R | no_license | daoleen/RAir | R | false | false | 10,014 | r | source('helper.R')
testMiddleNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, NA, 33, 34, NA, 36, 37, 38, 39, NA, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72
)
)
if(!is.na(data$Value[32])) {
print("Error: value[32] is avaliable")
}
if(!is.na(data$Value[35])) {
print("Error: value[35] is avaliable")
}
if(!is.na(data$Value[40])) {
print("Error: value[40] is avaliable")
}
result <- get.FillValueForTimeSeries(data)
if(result$Value[32] != 8) {
print("Error: value[32] must be 8")
}
if(result$Value[35] != 11) {
print("Error: value[35] must be 11")
}
if(result$Value[40] != 16) {
print("Error: value[40] must be 16")
}
}
testBeginNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72
)
)
result <- get.FillValueForTimeSeries(data)
for(i in 1:24) {
if(result$Value[i] != (i+24)) {
print("Error: value[i] must be i+24")
}
}
}
testEndNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA
)
)
result <- get.FillValueForTimeSeries(data)
for(i in 49:72) {
if(result$Value[i] != (i-24)) {
print("Error: value[i] must be i-24")
}
}
}
testFirstTwoNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72
)
)
result <- get.FillValueForTimeSeries(data)
for(i in 1:24) {
if(result$Value[i] != (i+48)) {
print("Error: value[i] must be i+48")
}
}
for(i in 25:48) {
if(result$Value[i] != (i+24)) {
print("Error: value[i] must be i+24")
}
}
}
testLastTwoNA <- function() {
data <- data.frame(
"ReadingDateTime"=c(
'1/1/2009 0:00', '1/1/2009 1:00', '1/1/2009 2:00', '1/1/2009 3:00', '1/1/2009 4:00', '1/1/2009 5:00', '1/1/2009 6:00', '1/1/2009 7:00', '1/1/2009 8:00', '1/1/2009 9:00', '1/1/2009 10:00', '1/1/2009 11:00', '1/1/2009 12:00', '1/1/2009 13:00', '1/1/2009 14:00', '1/1/2009 15:00', '1/1/2009 16:00', '1/1/2009 17:00', '1/1/2009 18:00', '1/1/2009 19:00', '1/1/2009 20:00', '1/1/2009 21:00', '1/1/2009 22:00', '1/1/2009 23:00',
'2/1/2009 0:00', '2/1/2009 1:00', '2/1/2009 2:00', '2/1/2009 3:00', '2/1/2009 4:00', '2/1/2009 5:00', '2/1/2009 6:00', '2/1/2009 7:00', '2/1/2009 8:00', '2/1/2009 9:00', '2/1/2009 10:00', '2/1/2009 11:00', '2/1/2009 12:00', '2/1/2009 13:00', '2/1/2009 14:00', '2/1/2009 15:00', '2/1/2009 16:00', '2/1/2009 17:00', '2/1/2009 18:00', '2/1/2009 19:00', '2/1/2009 20:00', '2/1/2009 21:00', '2/1/2009 22:00', '2/1/2009 23:00',
'3/1/2009 0:00', '3/1/2009 1:00', '3/1/2009 2:00', '3/1/2009 3:00', '3/1/2009 4:00', '3/1/2009 5:00', '3/1/2009 6:00', '3/1/2009 7:00', '3/1/2009 8:00', '3/1/2009 9:00', '3/1/2009 10:00', '3/1/2009 11:00', '3/1/2009 12:00', '3/1/2009 13:00', '3/1/2009 14:00', '3/1/2009 15:00', '3/1/2009 16:00', '3/1/2009 17:00', '3/1/2009 18:00', '3/1/2009 19:00', '3/1/2009 20:00', '3/1/2009 21:00', '3/1/2009 22:00', '3/1/2009 23:00'
),
"Value" = c(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA
)
)
result <- get.FillValueForTimeSeries(data)
for(i in 25:48) {
if(result$Value[i] != (i-24)) {
print("Error: value[i] must be i-24")
}
}
for(i in 49:72) {
if(result$Value[i] != (i-48)) {
print("Error: value[i] must be i-48")
}
}
}
testMiddleNA()
testBeginNA()
testEndNA()
testFirstTwoNA()
testLastTwoNA() |
library(quanteda)
### Name: ntoken
### Title: Count the number of tokens or types
### Aliases: ntoken ntype
### ** Examples
# simple example
txt <- c(text1 = "This is a sentence, this.", text2 = "A word. Repeated repeated.")
ntoken(txt)
ntype(txt)
ntoken(char_tolower(txt)) # same
ntype(char_tolower(txt)) # fewer types
ntoken(char_tolower(txt), remove_punct = TRUE)
ntype(char_tolower(txt), remove_punct = TRUE)
# with some real texts
ntoken(corpus_subset(data_corpus_inaugural, Year<1806), remove_punct = TRUE)
ntype(corpus_subset(data_corpus_inaugural, Year<1806), remove_punct = TRUE)
ntoken(dfm(corpus_subset(data_corpus_inaugural, Year<1800)))
ntype(dfm(corpus_subset(data_corpus_inaugural, Year<1800)))
| /data/genthat_extracted_code/quanteda/examples/ntoken.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 721 | r | library(quanteda)
### Name: ntoken
### Title: Count the number of tokens or types
### Aliases: ntoken ntype
### ** Examples
# simple example
txt <- c(text1 = "This is a sentence, this.", text2 = "A word. Repeated repeated.")
ntoken(txt)
ntype(txt)
ntoken(char_tolower(txt)) # same
ntype(char_tolower(txt)) # fewer types
ntoken(char_tolower(txt), remove_punct = TRUE)
ntype(char_tolower(txt), remove_punct = TRUE)
# with some real texts
ntoken(corpus_subset(data_corpus_inaugural, Year<1806), remove_punct = TRUE)
ntype(corpus_subset(data_corpus_inaugural, Year<1806), remove_punct = TRUE)
ntoken(dfm(corpus_subset(data_corpus_inaugural, Year<1800)))
ntype(dfm(corpus_subset(data_corpus_inaugural, Year<1800)))
|
require(data.table)
require(icd)
load("source/dt.sim.RData")
dt.dx <- dt.sim
dt.dx
length(unique(dt.dx$Patient_ID))
length(unique(dt.dx$Record))
# 995
map <- fread("source/icd9_codes_2018_07_21.csv",
colClasses = c("character"))
l1 <- as.comorbidity_map(split(x = map$code,
f = map$sub_chapter))
l1
# idIn <- "Patient_ID"
idIn <- "Record"
icdIn <- c("DX1",
"DX2")
dtt <- list()
for(i in 1:length(icdIn)){
dtt[[i]] <- comorbid(x = dt.dx,
map = l1,
visit_name = idIn,
icd_name = icdIn[i])
}
dt.comorb <- data.table(unique(dt.dx[,
colnames(dt.dx) == idIn,
with = FALSE]),
apply(Reduce("+",
dtt),
MARGIN = 2,
function(a){
a > 0
}))
head(dt.comorb)
summary(dt.comorb)
| /source/icd9_convert_v1.R | no_license | CVIRU/shiny.icd | R | false | false | 1,052 | r | require(data.table)
require(icd)
load("source/dt.sim.RData")
dt.dx <- dt.sim
dt.dx
length(unique(dt.dx$Patient_ID))
length(unique(dt.dx$Record))
# 995
map <- fread("source/icd9_codes_2018_07_21.csv",
colClasses = c("character"))
l1 <- as.comorbidity_map(split(x = map$code,
f = map$sub_chapter))
l1
# idIn <- "Patient_ID"
idIn <- "Record"
icdIn <- c("DX1",
"DX2")
dtt <- list()
for(i in 1:length(icdIn)){
dtt[[i]] <- comorbid(x = dt.dx,
map = l1,
visit_name = idIn,
icd_name = icdIn[i])
}
dt.comorb <- data.table(unique(dt.dx[,
colnames(dt.dx) == idIn,
with = FALSE]),
apply(Reduce("+",
dtt),
MARGIN = 2,
function(a){
a > 0
}))
head(dt.comorb)
summary(dt.comorb)
|
library(gdata)
library(chron)
library(sqldf)
library(RMySQL)
library(RSQLite)
library(ggplot2)
library(lubridate)
library(pryr)
library(tcltk2)
## Reading the data with SQL program
alldata<- c("C:/Users/Andres/Desktop/Online_courses/Exploratory Analysis/Project_1/household_power_consumption.txt")
data <- read.csv.sql(alldata, sql = 'select * from file where Date = "1/2/2007" OR Date = "2/2/2007"',
header = TRUE, sep = ";")
# Opening the plot
png(file="plot3.png")
# set the time as X-legend
time <- wday(data$Date, label = TRUE, abbr = TRUE)
# set the Y-legend
sub_metering_1<-data$Sub_metering_1
sub_metering_2<-data$Sub_metering_2
sub_metering_3<-data$Sub_metering_3
# Fitting the different plots in the same graph
plot(sub_metering_1 , type="l", ylab = "Energy Sub metering", xlab="", col="black", xaxt="n" )
lines(sub_metering_2, col="red1")
lines(sub_metering_3, col="blue")
#Costumizing the x axis
axis(1, at=c(1,length(time)/2+1,length(time) ),
labels=c( toString(time[1]), toString(time[length(time)/2+1]), "Sat"),
tck=-0.01)
#Adding the leggend to the plots
legend("topright", inset=.001,
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1, lty=1, col=c("black", "red1", "blue"), horiz=FALSE)
dev.off()
| /plot3.R | no_license | aazqueta/Exploratory-Data | R | false | false | 1,295 | r | library(gdata)
library(chron)
library(sqldf)
library(RMySQL)
library(RSQLite)
library(ggplot2)
library(lubridate)
library(pryr)
library(tcltk2)
## Reading the data with SQL program
alldata<- c("C:/Users/Andres/Desktop/Online_courses/Exploratory Analysis/Project_1/household_power_consumption.txt")
data <- read.csv.sql(alldata, sql = 'select * from file where Date = "1/2/2007" OR Date = "2/2/2007"',
header = TRUE, sep = ";")
# Opening the plot
png(file="plot3.png")
# set the time as X-legend
time <- wday(data$Date, label = TRUE, abbr = TRUE)
# set the Y-legend
sub_metering_1<-data$Sub_metering_1
sub_metering_2<-data$Sub_metering_2
sub_metering_3<-data$Sub_metering_3
# Fitting the different plots in the same graph
plot(sub_metering_1 , type="l", ylab = "Energy Sub metering", xlab="", col="black", xaxt="n" )
lines(sub_metering_2, col="red1")
lines(sub_metering_3, col="blue")
#Costumizing the x axis
axis(1, at=c(1,length(time)/2+1,length(time) ),
labels=c( toString(time[1]), toString(time[length(time)/2+1]), "Sat"),
tck=-0.01)
#Adding the leggend to the plots
legend("topright", inset=.001,
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1, lty=1, col=c("black", "red1", "blue"), horiz=FALSE)
dev.off()
|
library(BBmisc)
### Name: clipString
### Title: Shortens strings to a given length.
### Aliases: clipString
### ** Examples
print(clipString("abcdef", 10))
print(clipString("abcdef", 5))
| /data/genthat_extracted_code/BBmisc/examples/clipString.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 194 | r | library(BBmisc)
### Name: clipString
### Title: Shortens strings to a given length.
### Aliases: clipString
### ** Examples
print(clipString("abcdef", 10))
print(clipString("abcdef", 5))
|
#works with snapshots data to get information on page ¨likes¨ and ¨PTAT¨ over time
library(ggplot2)
library(scales)
library(dplyr)
#Reading Data
m <- read.csv("data/output-microsoft.csv", sep=",", header = TRUE)
m$created <- substr(m$created_at, 0, 10)
m$created <- as.Date(m$created)
names(m) <- c("mlikes","mtalking", "mcreated_at", "created")
w <- read.csv("data/snapshots-walmart.csv", sep=",", header = TRUE)
w$created <- substr(w$created_at, 0, 10)
w$created <- as.Date(w$created)
names(w) <- c("wlikes","wtalking", "wcreated_at", "created")
g <- read.csv("data/snapshots-ge-appliances.csv", sep=",", header = TRUE)
g$created <- substr(g$created_at, 0, 10)
g$created <- as.Date(g$created)
names(g) <- c("glikes","gtalking", "gcreated_at", "created")
j <- read.csv("data/snapshots-johnson-s-baby.csv", sep=",", header = TRUE)
j$created <- substr(j$created_at, 0, 10)
j$created <- as.Date(j$created)
names(j) <- c("jlikes","jtalking", "jcreated_at", "created")
x <- read.csv("data/snapshots-xbox.csv", sep=",", header = TRUE)
x$created <- substr(x$created_at, 0, 10)
x$created <- as.Date(x$created)
names(x) <- c("xlikes","xtalking", "xcreated_at", "created")
#Putting it all together
data <- Reduce(function(x, y) merge(x, y, all=TRUE), list(m, w, g,j,x))
data <- data[data$created > as.Date("2013-01-31"),]
#Graphing 'Likes' over time
dataLikes <- data[,c(1,2,5,8,11,14)]
dataLikes <- dataLikes[,c(1,3,6,2,5,4)]
names(dataLikes) <- c("created", "Walmart","Xbox","Microsoft", "Johnson's Baby","GE Appliances")
meltdata <- melt(dataLikes, id="created")
names(meltdata)[2] <- 'Page'
theme_new <- theme_set(theme_bw())
p <- ggplot(data = meltdata, aes(x = created, y = value, color = Page,
format(scientific=FALSE))) +
geom_line(size = 1) +
scale_y_continuous(labels = comma) +
xlab('') +
ylab('Likes')+
labs(title = "'Likes' for Facebook Pages")
p
#Annual Percentage Increase in Likes
pchg <- (dataLikes[dataLikes$created == as.Date("2014-02-01"),2:6]/dataLikes[1,2:6]-1)*100
pchg
#Graphing 'Talking About' over time
dataTalk <- data[,c(1,3,6,9,12,15)]
dataTalk <- dataTalk[,c(1,3,6,2,5,4)]
names(dataTalk) <- c("created", "Walmart","Xbox","Microsoft", "Johnson's Baby","GE Appliances")
meltdata <- melt(dataTalk, id="created")
names(meltdata)[2] <- 'Page'
theme_new <- theme_set(theme_bw())
q <- ggplot(data = meltdata, aes(x = created, y = value, color = Page,
format(scientific=FALSE))) +
geom_line(size = 1) +
scale_y_continuous(labels = comma )+
xlab('') +
ylab('PTAT')+
labs(title = "'PTAT' for Facebook Pages")
q
qplot(created,wtalking,data=data)
#PTAT to Likes ratio
ratio <- (dataTalk[,2:6]/dataLikes[,2:6])*100
ratio <- cbind(data$created,ratio)
meltdata <- melt(ratio, id="data$created")
names(meltdata) <- c("created","Page","value")
r <- ggplot(data = meltdata, aes(x = created, y = value, color = Page,
format(scientific=FALSE))) +
geom_line(size = 1) +
scale_y_continuous(labels = comma )+
xlab('') +
ylab('Ratio')+
labs(title = "'PTAT' to 'Likes' Ratio")
r
summary(ratio)
| /snapshots.r | no_license | smc-dta/Facebook-Pages-Analysis-Corporate | R | false | false | 3,143 | r | #works with snapshots data to get information on page ¨likes¨ and ¨PTAT¨ over time
library(ggplot2)
library(scales)
library(dplyr)
#Reading Data
m <- read.csv("data/output-microsoft.csv", sep=",", header = TRUE)
m$created <- substr(m$created_at, 0, 10)
m$created <- as.Date(m$created)
names(m) <- c("mlikes","mtalking", "mcreated_at", "created")
w <- read.csv("data/snapshots-walmart.csv", sep=",", header = TRUE)
w$created <- substr(w$created_at, 0, 10)
w$created <- as.Date(w$created)
names(w) <- c("wlikes","wtalking", "wcreated_at", "created")
g <- read.csv("data/snapshots-ge-appliances.csv", sep=",", header = TRUE)
g$created <- substr(g$created_at, 0, 10)
g$created <- as.Date(g$created)
names(g) <- c("glikes","gtalking", "gcreated_at", "created")
j <- read.csv("data/snapshots-johnson-s-baby.csv", sep=",", header = TRUE)
j$created <- substr(j$created_at, 0, 10)
j$created <- as.Date(j$created)
names(j) <- c("jlikes","jtalking", "jcreated_at", "created")
x <- read.csv("data/snapshots-xbox.csv", sep=",", header = TRUE)
x$created <- substr(x$created_at, 0, 10)
x$created <- as.Date(x$created)
names(x) <- c("xlikes","xtalking", "xcreated_at", "created")
#Putting it all together
data <- Reduce(function(x, y) merge(x, y, all=TRUE), list(m, w, g,j,x))
data <- data[data$created > as.Date("2013-01-31"),]
#Graphing 'Likes' over time
dataLikes <- data[,c(1,2,5,8,11,14)]
dataLikes <- dataLikes[,c(1,3,6,2,5,4)]
names(dataLikes) <- c("created", "Walmart","Xbox","Microsoft", "Johnson's Baby","GE Appliances")
meltdata <- melt(dataLikes, id="created")
names(meltdata)[2] <- 'Page'
theme_new <- theme_set(theme_bw())
p <- ggplot(data = meltdata, aes(x = created, y = value, color = Page,
format(scientific=FALSE))) +
geom_line(size = 1) +
scale_y_continuous(labels = comma) +
xlab('') +
ylab('Likes')+
labs(title = "'Likes' for Facebook Pages")
p
#Annual Percentage Increase in Likes
pchg <- (dataLikes[dataLikes$created == as.Date("2014-02-01"),2:6]/dataLikes[1,2:6]-1)*100
pchg
#Graphing 'Talking About' over time
dataTalk <- data[,c(1,3,6,9,12,15)]
dataTalk <- dataTalk[,c(1,3,6,2,5,4)]
names(dataTalk) <- c("created", "Walmart","Xbox","Microsoft", "Johnson's Baby","GE Appliances")
meltdata <- melt(dataTalk, id="created")
names(meltdata)[2] <- 'Page'
theme_new <- theme_set(theme_bw())
q <- ggplot(data = meltdata, aes(x = created, y = value, color = Page,
format(scientific=FALSE))) +
geom_line(size = 1) +
scale_y_continuous(labels = comma )+
xlab('') +
ylab('PTAT')+
labs(title = "'PTAT' for Facebook Pages")
q
qplot(created,wtalking,data=data)
#PTAT to Likes ratio
ratio <- (dataTalk[,2:6]/dataLikes[,2:6])*100
ratio <- cbind(data$created,ratio)
meltdata <- melt(ratio, id="data$created")
names(meltdata) <- c("created","Page","value")
r <- ggplot(data = meltdata, aes(x = created, y = value, color = Page,
format(scientific=FALSE))) +
geom_line(size = 1) +
scale_y_continuous(labels = comma )+
xlab('') +
ylab('Ratio')+
labs(title = "'PTAT' to 'Likes' Ratio")
r
summary(ratio)
|
# Pedram Jahangiry
# this is a quick review for the R programming
## data types in R
# print ? before any function you want to know more about.
#---------------------------------------------------------------------------------------
# 1- integer (if you don't put "L" then it will be double by default)
x <- 2L
typeof(x)
is.integer(x)
#---------------------------------------------------------------------------------------
# 2- double
y<- 2.5
is.double(y)
#---------------------------------------------------------------------------------------
# 3- character
a <- "hello"
typeof(a)
#---------------------------------------------------------------------------------------
# 4- logical
x1 <- TRUE # T or True
x2 <- F # F or False
## logicals
# > < == != !(not) /(or) & isTrue(x)
x<- 4!=5
isTRUE(x)
y<- !(4<5)
y
isTRUE(y)
#---------------------------------------------------------------------------------------
# 5 vector: note that vector has 1 type, not a combination
c(1,2,3) # combine function
seq(1,5) # sequence function (like 1:5)
seq(1,10,2)
seq(1,10,length.out = 100) #100 pieces btw 1:10
x<- c("a",2,3)
seq(1,9, along.with =x ) # make 1:10 into number of x pieces
#replicating
rep(1,3)
rep("pedram",2)
rep(c(1,0),times=5)
rep(c(1,0),each=5)
# brackets[]
x<- c(10,20,30,40,50)
x[2]
x[-3] # negative sign means do not include
x[1:3]
x[c(1,3)]
x[c(-2,-4,-5)]
x[-1:-2]
x[-2:-1]
x[20]
# vector arithmetic
a<- 1:3
b<- c(2,1,5)
a+b
a>b
a/b
a*b
#---------------------------------------------------------------------------------------
## 6 Matrix
mydata<- 1:20
A<- matrix(mydata,nrow = 4,ncol = 5)
A
B<- matrix(mydata,nrow = 4,ncol = 5, byrow = TRUE)
B
A[,1]
A[1:2,]
colnames(A)<- c("a","b","c","d","e")
A
A[,"a"]
colnames(A)<- NULL # clearing names
# some R-specific functions: rbind() , cbind(), rownames(), colnames()
# operations in matrix:
# / * + - are element by element
# %*% is matrix multiplication
A/B
A*B
A %*% t(B)
Z<- matrix(1:4,2,2)
solve(Z) # inverse matrix
Z %*% solve(Z)
# how to create identity matrix? google it
#---------------------------------------------------------------------------------------
# 7 Factors
# Create a vector.
apple_colors <- c('green','green','yellow','red','red','red','green')
# Create a factor object.
factor_apple <- factor(apple_colors, levels = c("red",'yellow','green') , ordered=TRUE)
# Print the factor.
factor_apple
nlevels(factor_apple)
as.numeric(factor_apple)
#---------------------------------------------------------------------------------------
# 8- Data frames
my_dataframe <- data.frame( student_name=c("PJ", "TJ", "MJ"),
gender= c("Male", "Male", "female"),
GPA=c(3.9,4,3.8))
my_dataframe
nrow(my_dataframe)
ncol(my_dataframe)
head(my_dataframe, n=2)
tail(my_dataframe)
str(my_dataframe) # for job interview str() and runif(), the latter is random uniform :)
summary(my_dataframe)
# extracting info from data frames
my_dataframe$student_name
my_dataframe["student_name"]
my_dataframe[c("student_name", "gender")]
my_dataframe[,-3]
my_dataframe[-2, ]
#---------------------------------------------------------------------------------------
## while
counter <- 1
while(counter < 11){
print(counter)
counter<- counter+1
}
## repeat
v <- c("Hello","loop")
cnt <- 1
repeat {
print(v)
cnt <- cnt+1
if(cnt > 5) {
break
}
}
## for
for(i in seq(1,10,3)){
print(paste("we are in number",i))
}
# if
x <- rnorm(n = 1, mean = 0,sd = 2)
x
if(x>1){
answer<- "greater than 1"
} else if(x>= -1) {
answer <- "btw -1 and 1"
} else{
answer <- "less than -1"
}
answer
# creating functions
my_add_function_plus3 <- function(x1,x2,x3){ x1+x2 + x3 +3}
my_add_function_plus3(1,1,1)
#built-in data in R studio
data()
#example
df <- mtcars
str(df)
table(df$cyl)
# instaling packages
# install.packages(c("wooldridge", "dplyr")) # run this line only once
library(wooldridge)
data()
# working with dplyr package (explore the cheatsheet from the help menu)
library(dplyr)
new_df <- data.frame(names=c("PJ", "TJ", "CJ", "MJ"), GPA=c(2,3.8,3.5, 4))
new_df
# useful functions in dplyr
?mutate()
?filter()
?select()
?arrange()
# pipe operator %>%
# mutate()
mutate(new_df, height = c(180,170,175, 172)) # adding a new column
new_df
new_df <- mutate(new_df, height = c(180,170,175,190))
new_df
new_df <- mutate(new_df, is_pass= ifelse(GPA>3.6,"pass", "fail"))
new_df
# filter()
new_df
filter(new_df, is_pass=="fail")
filter(new_df, GPA>3)
filter(new_df, GPA>3 & height>=175)
# arrange()
arrange(new_df, GPA)
arrange(new_df, desc(GPA))
# select()
new_df$height
new_df["height"]
select(new_df, height)
select(new_df, c(names,is_pass))
# So what is the power of select?
names(wage2)
select(wage2, contains("educ"))
head(select(wage2, starts_with("E"))) # what if I put a negative sign before start_with ?
head(select(wage2, ends_with("c")))
# you want even more power in terms of selecting columns? google grep() function Rdocumentation!
# combining functions
filter(new_df, GPA>3 & height>=170)
arrange(filter(new_df, GPA>3 & height>=170),desc(height))
select(arrange(filter(new_df, GPA>3 & height>=170),desc(height)), names)
# using pipe operator
new_df %>% filter(GPA>3 & height>=170) %>% arrange(desc(height)) %>% select(names)
# working with wooldridge data
# example : wage2
df<- wage2
head(df)
str(df)
summary(df)
# Handling missing data
# install.packages("visdat")
library(visdat)
vis_dat(df)
vis_miss(df)
# cleaning the data set
df_clean <-na.omit(df)
# Data tables
df <- select(df_clean, c("wage", "hours", "IQ", "educ", "age", "married", "black"))
head(df)
table(df$married)
table(df$black)
my_table<- table(df$married, df$black)
my_table
# changing the names in rows and columns:
colnames(my_table) <- c("non_black", "black")
rownames(my_table)<- c("non_married", "married")
my_table
# proportion tables
prop.table(my_table,margin= 1)
prop.table(my_table,margin= 2)
# factorizing some variables (we will use this for making dummy variables later in the course)
df$married <- factor(df$married)
df$black <- factor(df$black)
str(df)
# some basic plots
df<- wage2
# histogram
hist(df$wage)
hist(df$wage, xlab = "wage", col = "blue", breaks = 5)
# scatter plot
plot(df$educ, df$wage, xlab = "education", ylab = "Wage")
#---------------------------------------------------------------------------------------
# 9- List: list is a generic collection of objects
my_list <- list()
my_list[[1]] <- "Hello"
my_list[[2]] <- c(1,2,3)
my_list[[3]] <- data.frame(name=c("A", "B"), value=c(4,3.8))
my_list[[3]]
my_list[[3]][1,2]
| /Classes/3-4 Introduction to R and markdown/R basics/R_basics.R | no_license | PJalgotrader/Econometrics-USU-SP21 | R | false | false | 6,711 | r | # Pedram Jahangiry
# this is a quick review for the R programming
## data types in R
# print ? before any function you want to know more about.
#---------------------------------------------------------------------------------------
# 1- integer (if you don't put "L" then it will be double by default)
x <- 2L
typeof(x)
is.integer(x)
#---------------------------------------------------------------------------------------
# 2- double
y<- 2.5
is.double(y)
#---------------------------------------------------------------------------------------
# 3- character
a <- "hello"
typeof(a)
#---------------------------------------------------------------------------------------
# 4- logical
x1 <- TRUE # T or True
x2 <- F # F or False
## logicals
# > < == != !(not) /(or) & isTrue(x)
x<- 4!=5
isTRUE(x)
y<- !(4<5)
y
isTRUE(y)
#---------------------------------------------------------------------------------------
# 5 vector: note that vector has 1 type, not a combination
c(1,2,3) # combine function
seq(1,5) # sequence function (like 1:5)
seq(1,10,2)
seq(1,10,length.out = 100) #100 pieces btw 1:10
x<- c("a",2,3)
seq(1,9, along.with =x ) # make 1:10 into number of x pieces
#replicating
rep(1,3)
rep("pedram",2)
rep(c(1,0),times=5)
rep(c(1,0),each=5)
# brackets[]
x<- c(10,20,30,40,50)
x[2]
x[-3] # negative sign means do not include
x[1:3]
x[c(1,3)]
x[c(-2,-4,-5)]
x[-1:-2]
x[-2:-1]
x[20]
# vector arithmetic
a<- 1:3
b<- c(2,1,5)
a+b
a>b
a/b
a*b
#---------------------------------------------------------------------------------------
## 6 Matrix
mydata<- 1:20
A<- matrix(mydata,nrow = 4,ncol = 5)
A
B<- matrix(mydata,nrow = 4,ncol = 5, byrow = TRUE)
B
A[,1]
A[1:2,]
colnames(A)<- c("a","b","c","d","e")
A
A[,"a"]
colnames(A)<- NULL # clearing names
# some R-specific functions: rbind() , cbind(), rownames(), colnames()
# operations in matrix:
# / * + - are element by element
# %*% is matrix multiplication
A/B
A*B
A %*% t(B)
Z<- matrix(1:4,2,2)
solve(Z) # inverse matrix
Z %*% solve(Z)
# how to create identity matrix? google it
#---------------------------------------------------------------------------------------
# 7 Factors
# Create a vector.
apple_colors <- c('green','green','yellow','red','red','red','green')
# Create a factor object.
factor_apple <- factor(apple_colors, levels = c("red",'yellow','green') , ordered=TRUE)
# Print the factor.
factor_apple
nlevels(factor_apple)
as.numeric(factor_apple)
#---------------------------------------------------------------------------------------
# 8- Data frames
my_dataframe <- data.frame( student_name=c("PJ", "TJ", "MJ"),
gender= c("Male", "Male", "female"),
GPA=c(3.9,4,3.8))
my_dataframe
nrow(my_dataframe)
ncol(my_dataframe)
head(my_dataframe, n=2)
tail(my_dataframe)
str(my_dataframe) # for job interview str() and runif(), the latter is random uniform :)
summary(my_dataframe)
# extracting info from data frames
my_dataframe$student_name
my_dataframe["student_name"]
my_dataframe[c("student_name", "gender")]
my_dataframe[,-3]
my_dataframe[-2, ]
#---------------------------------------------------------------------------------------
## while
counter <- 1
while(counter < 11){
print(counter)
counter<- counter+1
}
## repeat
v <- c("Hello","loop")
cnt <- 1
repeat {
print(v)
cnt <- cnt+1
if(cnt > 5) {
break
}
}
## for
for(i in seq(1,10,3)){
print(paste("we are in number",i))
}
# if
x <- rnorm(n = 1, mean = 0,sd = 2)
x
if(x>1){
answer<- "greater than 1"
} else if(x>= -1) {
answer <- "btw -1 and 1"
} else{
answer <- "less than -1"
}
answer
# creating functions
my_add_function_plus3 <- function(x1,x2,x3){ x1+x2 + x3 +3}
my_add_function_plus3(1,1,1)
#built-in data in R studio
data()
#example
df <- mtcars
str(df)
table(df$cyl)
# instaling packages
# install.packages(c("wooldridge", "dplyr")) # run this line only once
library(wooldridge)
data()
# working with dplyr package (explore the cheatsheet from the help menu)
library(dplyr)
new_df <- data.frame(names=c("PJ", "TJ", "CJ", "MJ"), GPA=c(2,3.8,3.5, 4))
new_df
# useful functions in dplyr
?mutate()
?filter()
?select()
?arrange()
# pipe operator %>%
# mutate()
mutate(new_df, height = c(180,170,175, 172)) # adding a new column
new_df
new_df <- mutate(new_df, height = c(180,170,175,190))
new_df
new_df <- mutate(new_df, is_pass= ifelse(GPA>3.6,"pass", "fail"))
new_df
# filter()
new_df
filter(new_df, is_pass=="fail")
filter(new_df, GPA>3)
filter(new_df, GPA>3 & height>=175)
# arrange()
arrange(new_df, GPA)
arrange(new_df, desc(GPA))
# select()
new_df$height
new_df["height"]
select(new_df, height)
select(new_df, c(names,is_pass))
# So what is the power of select?
names(wage2)
select(wage2, contains("educ"))
head(select(wage2, starts_with("E"))) # what if I put a negative sign before start_with ?
head(select(wage2, ends_with("c")))
# you want even more power in terms of selecting columns? google grep() function Rdocumentation!
# combining functions
filter(new_df, GPA>3 & height>=170)
arrange(filter(new_df, GPA>3 & height>=170),desc(height))
select(arrange(filter(new_df, GPA>3 & height>=170),desc(height)), names)
# using pipe operator
new_df %>% filter(GPA>3 & height>=170) %>% arrange(desc(height)) %>% select(names)
# working with wooldridge data
# example : wage2
df<- wage2
head(df)
str(df)
summary(df)
# Handling missing data
# install.packages("visdat")
library(visdat)
vis_dat(df)
vis_miss(df)
# cleaning the data set
df_clean <-na.omit(df)
# Data tables
df <- select(df_clean, c("wage", "hours", "IQ", "educ", "age", "married", "black"))
head(df)
table(df$married)
table(df$black)
my_table<- table(df$married, df$black)
my_table
# changing the names in rows and columns:
colnames(my_table) <- c("non_black", "black")
rownames(my_table)<- c("non_married", "married")
my_table
# proportion tables
prop.table(my_table,margin= 1)
prop.table(my_table,margin= 2)
# factorizing some variables (we will use this for making dummy variables later in the course)
df$married <- factor(df$married)
df$black <- factor(df$black)
str(df)
# some basic plots
df<- wage2
# histogram
hist(df$wage)
hist(df$wage, xlab = "wage", col = "blue", breaks = 5)
# scatter plot
plot(df$educ, df$wage, xlab = "education", ylab = "Wage")
#---------------------------------------------------------------------------------------
# 9- List: list is a generic collection of objects
my_list <- list()
my_list[[1]] <- "Hello"
my_list[[2]] <- c(1,2,3)
my_list[[3]] <- data.frame(name=c("A", "B"), value=c(4,3.8))
my_list[[3]]
my_list[[3]][1,2]
|
testlist <- list(end = NULL, start = NULL, x = structure(c(4.65661288170191e-10, 6.95356800386775e-310, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L)))
result <- do.call(dynutils::project_to_segments,testlist)
str(result) | /dynutils/inst/testfiles/project_to_segments/AFL_project_to_segments/project_to_segments_valgrind_files/1609871406-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 409 | r | testlist <- list(end = NULL, start = NULL, x = structure(c(4.65661288170191e-10, 6.95356800386775e-310, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L)))
result <- do.call(dynutils::project_to_segments,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_functions.R
\name{directory.asps.list}
\alias{directory.asps.list}
\title{List the ASPs issued by a user.}
\usage{
directory.asps.list(userKey)
}
\arguments{
\item{userKey}{Identifies the user in the API request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/admin.directory.user.security
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/admin.directory.user.security)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/admin-sdk/directory/}{Google Documentation}
}
| /googleadmindirectoryv1.auto/man/directory.asps.list.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 845 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_functions.R
\name{directory.asps.list}
\alias{directory.asps.list}
\title{List the ASPs issued by a user.}
\usage{
directory.asps.list(userKey)
}
\arguments{
\item{userKey}{Identifies the user in the API request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/admin.directory.user.security
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/admin.directory.user.security)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/admin-sdk/directory/}{Google Documentation}
}
|
plot(0, xlim = c(-10,10), ylim = c(-10,10), type="n", xlab = "", ylab = "", bty = "n", xaxt = "n",yaxt = "n", main = "Población vs. Muestra")
draw.circle(0,0,6.4, border = "black", col = "yellow")
draw.circle(3,0,3, border = "black", col = "red")
text(-3.5,0, "Población", font = 2)
text(3,0, "Muestra", font = 2)
| /Tendencia_Central/mod1_pop_vs_samp.R | no_license | alcideschaux/Analisis_de_Datos | R | false | false | 316 | r | plot(0, xlim = c(-10,10), ylim = c(-10,10), type="n", xlab = "", ylab = "", bty = "n", xaxt = "n",yaxt = "n", main = "Población vs. Muestra")
draw.circle(0,0,6.4, border = "black", col = "yellow")
draw.circle(3,0,3, border = "black", col = "red")
text(-3.5,0, "Población", font = 2)
text(3,0, "Muestra", font = 2)
|
#' Create the ss3sim logo
#'
#' Generate and save, if \code{outfile} is provided,
#' the ss3sim logo using the built-in data.
#'
#' @template outfile
#' @examples
#' ss3sim:::create_logo()
#' dev.off()
#' @return A \code{png} file or a graphics device with the
#' logo used for the \code{ss3sim} project.
#' @author Kelli Faye Johnson
#'
create_logo <- function(outfile = NULL) {
if (!is.null(outfile)) {
grDevices::png(outfile,
width = 4, height = 4, units = "in", res = 600)
on.exit(dev.off())
}
utils::data(ts_dat, package = "ss3sim")
# cols <- RColorBrewer::brewer.pal(8, "Blues")
cols <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1",
"#6BAED6", "#4292C6", "#2171B5", "#084594")
ts_dat <- calculate_re(ts_dat, add = FALSE)
ts_dat <- ts_dat[ts_dat$E == "E0" & ts_dat$D == "D1", ]
quant_dat <- data.frame(do.call("rbind",
tapply(ts_dat$SpawnBio_re, ts_dat$year, stats::quantile,
probs = c(0.05,0.25, 0.50, 0.75, 0.95), na.rm = TRUE)))
colnames(quant_dat) <- c("q05", "q25", "q50", "q75", "q95")
quant_dat <- na.omit(quant_dat)
quant_dat$year <- row.names(quant_dat)
plot(1, 1,
xlim = ceiling(stats::quantile(type.convert(quant_dat$year, as.is = TRUE), probs = c(0.03, 0.50))),
ylim = c(-0.3, 0.3),
type = "n", axes = FALSE, ann = FALSE, xaxs = "i")
graphics::polygon(c(quant_dat$year, rev(quant_dat$year)), c(quant_dat$q05, rev(quant_dat$q95)),
col = cols[4], border = NA)
graphics::polygon(c(quant_dat$year, rev(quant_dat$year)), c(quant_dat$q25, rev(quant_dat$q75)),
col = cols[6], border = NA)
graphics::lines(quant_dat$year, quant_dat$q50, col = cols[8], lwd = 3)
}
| /R/create_logo.R | no_license | realsmak88/ss3sim | R | false | false | 1,659 | r | #' Create the ss3sim logo
#'
#' Generate and save, if \code{outfile} is provided,
#' the ss3sim logo using the built-in data.
#'
#' @template outfile
#' @examples
#' ss3sim:::create_logo()
#' dev.off()
#' @return A \code{png} file or a graphics device with the
#' logo used for the \code{ss3sim} project.
#' @author Kelli Faye Johnson
#'
create_logo <- function(outfile = NULL) {
if (!is.null(outfile)) {
grDevices::png(outfile,
width = 4, height = 4, units = "in", res = 600)
on.exit(dev.off())
}
utils::data(ts_dat, package = "ss3sim")
# cols <- RColorBrewer::brewer.pal(8, "Blues")
cols <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1",
"#6BAED6", "#4292C6", "#2171B5", "#084594")
ts_dat <- calculate_re(ts_dat, add = FALSE)
ts_dat <- ts_dat[ts_dat$E == "E0" & ts_dat$D == "D1", ]
quant_dat <- data.frame(do.call("rbind",
tapply(ts_dat$SpawnBio_re, ts_dat$year, stats::quantile,
probs = c(0.05,0.25, 0.50, 0.75, 0.95), na.rm = TRUE)))
colnames(quant_dat) <- c("q05", "q25", "q50", "q75", "q95")
quant_dat <- na.omit(quant_dat)
quant_dat$year <- row.names(quant_dat)
plot(1, 1,
xlim = ceiling(stats::quantile(type.convert(quant_dat$year, as.is = TRUE), probs = c(0.03, 0.50))),
ylim = c(-0.3, 0.3),
type = "n", axes = FALSE, ann = FALSE, xaxs = "i")
graphics::polygon(c(quant_dat$year, rev(quant_dat$year)), c(quant_dat$q05, rev(quant_dat$q95)),
col = cols[4], border = NA)
graphics::polygon(c(quant_dat$year, rev(quant_dat$year)), c(quant_dat$q25, rev(quant_dat$q75)),
col = cols[6], border = NA)
graphics::lines(quant_dat$year, quant_dat$q50, col = cols[8], lwd = 3)
}
|
##' .. Given the populations at two places and the distances between
##' them, returns the flow vector under the specified model ..
##' The models are : gravity and radiation.
##'
##' @title
##' @param K
##' @param alpha
##' @param beta
##' @param gamma
##' @param model
##' @return
##' @author Sangeeta Bhatia
flow_vector <- function(N_from,
N_to,
distance,
model,
params) {
if (model == "gravity") {
K <- params$K
pow_N_from <- params$pow_N_from
pow_N_to <- params$pow_N_to
pow_dist <- params$pow_dist
gravity_model_flow(N_from, N_to, distance, K,
pow_N_from, pow_N_to, pow_dist)
} else if (model == "poisson_gravity") {
K <- params$K
pow_N_from <- params$pow_N_from
pow_N_to <- params$pow_N_to
pow_dist <- params$pow_dist
poisson_gravity(N_from, N_to, distance, K,
pow_N_from, pow_N_to, pow_dist)
} else if (model == "gravity_alt") {
tau <- params$tau
rho <- params$rho
alpha <- params$alpha
gravity_alt(N_to, distance, tau, rho, alpha)
} else
stop("Model not yet implemented")
}
gravity_alt <- function(N_to, distance, tau, rho, alpha) {
(N_to ^ tau) * ((1 + distance/rho)^(-alpha))
}
##' Flow using gravity model based on Poisson process
##'
##' @details In this model the flow between locations is
##' distributed accordin to a poisson process with mean
##' lamda_ij = exp(b0 + b1*ln(P1) + b2*ln(P2) + b3*ln(dij))
##' @title
##' @param N_from
##' @param N_to
##' @param distance
##' @param K
##' @param pow_N_from
##' @param pow_N_to
##' @param pow_dist note that this must be entered as a -ve number.
##' @return
##' @author Sangeeta Bhatia
poisson_gravity <- function(N_from,
N_to,
distance,
K,
pow_N_from,
pow_N_to,
pow_dist) {
exp(K +
pow_N_from * log(N_from) +
pow_N_to * log(N_to) +
pow_dist * log(distance))
}
##' Given the populations of A and B and the distance between them,
##' return the estimated population flow between
##' them modeled as
##' \phi(A,B) = K N_A^{\alpha}N_B^{\beta}/r_{AB}^{\gamma}..
##' @title Computes the flow from A to B under the gravity model
##' @param N_from population of the source
##' @param N_to population of the destination
##' @param dist distance between the two places
##' @param pow_N_from power on the population of the source
##' @param pow_N_to power on the population of the destination
##' @param pow_dist power on the distance between the source and the
##' destination
##' @return estimated flow between source and destination
##' @author Pierre Nouvellet, Anne Cori Sangeeta Bhatia
##' @export
gravity_model_flow <- function(N_from, N_to, distance, K,
pow_N_from, pow_N_to, pow_dist) {
K * (N_from ^ pow_N_from) * (N_to ^ pow_N_to) /
(distance ^ pow_dist)
}
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##' @title
##' @param distances distance vector
##' @param n_from population at source
##' @param n_to population at destination
##' @param place_names
##' @param model must be one of gravity, gravity_alt
##' @param params list of model-specific parameters
##' @return matrix of population flow
##' @author Sangeeta Bhatia
##' @export
flow_matrix <- function(distances,
n_from,
n_to,
place_names,
model = c("gravity", "gravity_alt"),
params) {
flow_mat <-
matrix(NA, length(place_names), length(place_names))
rownames(flow_mat) <- place_names
colnames(flow_mat) <- place_names
## fill in the matrix from the vectors
flow_from_to <- flow_vector(n_from,
n_to,
distances,
model,
params)
flow_mat[lower.tri(flow_mat)] <- flow_from_to
## fill out the upper triangle
flow_mat <- t(flow_mat)
flow_to_from <- flow_vector(n_to,
n_from,
distances,
model,
params)
## fill out the lower triangle
flow_mat[lower.tri(flow_mat)] <-
flow_to_from
flow_mat
}
##' Probability of moving from location i to j
##'
##' the probability of moving from location i to location j is given by
##' (1 - p_stay_at_i) * (flow_from_i_to_j/(total outward flow from i))
##' @title
##' @param relative_risk n * n matrix where n = n.locations
##' @param p_stay a vector of length n where the ith entry specifies
##' the probability of staying at location i. If length of p_stay is
##' less than n, elements will be recycled.
##' @return a n * n matrix specifying the population flow between n
##' locations
##' @author Sangeeta Bhatia
probability_movement <- function(relative_risk, p_stay) {
if (nrow(relative_risk) != ncol(relative_risk)) {
stop("relative_risk should be a square matrix.")
}
n_countries <- nrow(relative_risk)
p_mat <- matrix(
rep(p_stay, each = n_countries,
length.out = n_countries ^ 2),
nrow = n_countries,
byrow = TRUE
)
p_mat <- 1 - p_mat
p_movement <- relative_risk * p_mat
diag(p_movement) <-
rep(p_stay, each = 1, length.out = n_countries)
p_movement
}
| /R/spatial_processing.R | no_license | annecori/mRIIDSprocessData | R | false | false | 5,710 | r | ##' .. Given the populations at two places and the distances between
##' them, returns the flow vector under the specified model ..
##' The models are : gravity and radiation.
##'
##' @title
##' @param K
##' @param alpha
##' @param beta
##' @param gamma
##' @param model
##' @return
##' @author Sangeeta Bhatia
flow_vector <- function(N_from,
N_to,
distance,
model,
params) {
if (model == "gravity") {
K <- params$K
pow_N_from <- params$pow_N_from
pow_N_to <- params$pow_N_to
pow_dist <- params$pow_dist
gravity_model_flow(N_from, N_to, distance, K,
pow_N_from, pow_N_to, pow_dist)
} else if (model == "poisson_gravity") {
K <- params$K
pow_N_from <- params$pow_N_from
pow_N_to <- params$pow_N_to
pow_dist <- params$pow_dist
poisson_gravity(N_from, N_to, distance, K,
pow_N_from, pow_N_to, pow_dist)
} else if (model == "gravity_alt") {
tau <- params$tau
rho <- params$rho
alpha <- params$alpha
gravity_alt(N_to, distance, tau, rho, alpha)
} else
stop("Model not yet implemented")
}
gravity_alt <- function(N_to, distance, tau, rho, alpha) {
(N_to ^ tau) * ((1 + distance/rho)^(-alpha))
}
##' Flow using gravity model based on Poisson process
##'
##' @details In this model the flow between locations is
##' distributed accordin to a poisson process with mean
##' lamda_ij = exp(b0 + b1*ln(P1) + b2*ln(P2) + b3*ln(dij))
##' @title
##' @param N_from
##' @param N_to
##' @param distance
##' @param K
##' @param pow_N_from
##' @param pow_N_to
##' @param pow_dist note that this must be entered as a -ve number.
##' @return
##' @author Sangeeta Bhatia
poisson_gravity <- function(N_from,
N_to,
distance,
K,
pow_N_from,
pow_N_to,
pow_dist) {
exp(K +
pow_N_from * log(N_from) +
pow_N_to * log(N_to) +
pow_dist * log(distance))
}
##' Given the populations of A and B and the distance between them,
##' return the estimated population flow between
##' them modeled as
##' \phi(A,B) = K N_A^{\alpha}N_B^{\beta}/r_{AB}^{\gamma}..
##' @title Computes the flow from A to B under the gravity model
##' @param N_from population of the source
##' @param N_to population of the destination
##' @param dist distance between the two places
##' @param pow_N_from power on the population of the source
##' @param pow_N_to power on the population of the destination
##' @param pow_dist power on the distance between the source and the
##' destination
##' @return estimated flow between source and destination
##' @author Pierre Nouvellet, Anne Cori Sangeeta Bhatia
##' @export
gravity_model_flow <- function(N_from, N_to, distance, K,
pow_N_from, pow_N_to, pow_dist) {
K * (N_from ^ pow_N_from) * (N_to ^ pow_N_to) /
(distance ^ pow_dist)
}
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##' @title
##' @param distances distance vector
##' @param n_from population at source
##' @param n_to population at destination
##' @param place_names
##' @param model must be one of gravity, gravity_alt
##' @param params list of model-specific parameters
##' @return matrix of population flow
##' @author Sangeeta Bhatia
##' @export
flow_matrix <- function(distances,
n_from,
n_to,
place_names,
model = c("gravity", "gravity_alt"),
params) {
flow_mat <-
matrix(NA, length(place_names), length(place_names))
rownames(flow_mat) <- place_names
colnames(flow_mat) <- place_names
## fill in the matrix from the vectors
flow_from_to <- flow_vector(n_from,
n_to,
distances,
model,
params)
flow_mat[lower.tri(flow_mat)] <- flow_from_to
## fill out the upper triangle
flow_mat <- t(flow_mat)
flow_to_from <- flow_vector(n_to,
n_from,
distances,
model,
params)
## fill out the lower triangle
flow_mat[lower.tri(flow_mat)] <-
flow_to_from
flow_mat
}
##' Probability of moving from location i to j
##'
##' the probability of moving from location i to location j is given by
##' (1 - p_stay_at_i) * (flow_from_i_to_j/(total outward flow from i))
##' @title
##' @param relative_risk n * n matrix where n = n.locations
##' @param p_stay a vector of length n where the ith entry specifies
##' the probability of staying at location i. If length of p_stay is
##' less than n, elements will be recycled.
##' @return a n * n matrix specifying the population flow between n
##' locations
##' @author Sangeeta Bhatia
probability_movement <- function(relative_risk, p_stay) {
if (nrow(relative_risk) != ncol(relative_risk)) {
stop("relative_risk should be a square matrix.")
}
n_countries <- nrow(relative_risk)
p_mat <- matrix(
rep(p_stay, each = n_countries,
length.out = n_countries ^ 2),
nrow = n_countries,
byrow = TRUE
)
p_mat <- 1 - p_mat
p_movement <- relative_risk * p_mat
diag(p_movement) <-
rep(p_stay, each = 1, length.out = n_countries)
p_movement
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Pokemon Data Challenge (2018) - STAT 240"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
#sliderInput("bins",
# "Number of bins:",
# min = 1,
# max = 50,
# value = 30)
((HTML('<h5>For "Appearances By Weather" Tab
and "Locations of Appearances" Tab:</h5>'))),
selectInput("DateChoice", "Date (YYYY-MM-DD):",
c("2018-02-19", "2018-02-18", "2018-02-17", "2018-02-16",
"2018-02-15", "2018-02-14","2018-02-13", "2018-02-12",
"2018-02-11", "2018-02-10","2018-02-09","2018-02-08")),
checkboxInput("Gender", strong("Seperate Pokemon by Gender?"), FALSE),
conditionalPanel(
condition = "input.Gender == true",
helpText((HTML("<h3>Select a Gender: </h3>"))),
((HTML("<h5>Please be patient while generating. </h5>"))),
radioButtons("gendersource", "",
c(#"All" = "All",
"Female" = "Female",
"Male" = "Male"
#"No Gender" = "None"
))
),
((HTML('<h5>For "Locations of Appearances" Tab: </h5>'))),
checkboxInput("Weather", strong("Seperate Appearances by Weather?"), FALSE),
conditionalPanel(
condition = "input.Weather == true",
helpText((HTML("<h3>Select Weather: </h3>"))),
((HTML("<h5>Please be patient while generating. </h5>"))),
radioButtons("weathersource", "",
c("Windy" = "Windy","Snow" = "Snow","Rainy" = "Rainy",
"Partly Cloudy" = "Partly Cloudy", "None" = "None",
"Cloudy" = "Cloudy", "Clear" = "Clear"))
),
((HTML('<h5>For "Attacks VS Defense" and "Colors and Types" Tab: </h5>'))),
selectInput("ColChoice", "Pokemon Color:",
c("White", "Green", "Red", "Blue", "Brown", "Yellow",
"Purple", "Pink", "Grey", "Black")),
((HTML('<h5>For "Attacks VS Defense" Tab Only: </h5>'))),
checkboxInput("LinReg",strong("Add Linear Regression Line?"), FALSE),
checkboxInput("NewP", strong("Add yourself as a Pokemon?"), FALSE),
conditionalPanel(
condition = "input.NewP == true", #Note th lowercase logical
#helpText(HTML("<h5>For Attacks VS Defense Graph Only</h5>")),
helpText(HTML("<h3>Choose your Attack and Defense levels.</h3>")),
sliderInput("NewA", "Attack (will not effect regression):",
min = 0, max = 165, value = 75, step = 1),
sliderInput("NewD", "Defense(will not effect regression):",
min = 0, max = 230, value = 120, step = 1))
###checkboxInput("Word", strong("Create a Bar Plot for word use by
### bot account tweets?"),
### FALSE),
###conditionalPanel(
### condition = "input.Word == true",
### helpText((HTML("<h3>Select the bot account: </h3>"))),
### #((HTML("<h5>Please be patient while generating. </h5>"))),
### radioButtons("datasource", "",
### c("Vancouver (in BarPlots Tab)" = "Vancouver Pokemon Tweets",
### "Toronto (in BarPlots Tab)" = "Toronto Pokemon Tweets",
### "Chicago (in BarPlots Tab)" = "Chicago Pokemon Tweets"))
### #helpText((HTML("<h5>Clouds with words used between 50 and 11000 times </h5>")))
###)
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("About", source("About.R")$value()),
tabPanel("Appearances By Weather", plotOutput("WeatherPlot")),
tabPanel("Locations of Appearances", plotOutput("MapPlot")),
tabPanel("Attacks VS Defense", plotOutput("ColorPlot")),
tabPanel("Colors and Types", plotOutput("HistPlot"))
#tabPanel("Bar Plots", plotOutput("WordPlot"))
)
)
)
))
| /ui.R | no_license | CarlaLouw/PokemonGoShinyApp | R | false | false | 4,610 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Pokemon Data Challenge (2018) - STAT 240"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
#sliderInput("bins",
# "Number of bins:",
# min = 1,
# max = 50,
# value = 30)
((HTML('<h5>For "Appearances By Weather" Tab
and "Locations of Appearances" Tab:</h5>'))),
selectInput("DateChoice", "Date (YYYY-MM-DD):",
c("2018-02-19", "2018-02-18", "2018-02-17", "2018-02-16",
"2018-02-15", "2018-02-14","2018-02-13", "2018-02-12",
"2018-02-11", "2018-02-10","2018-02-09","2018-02-08")),
checkboxInput("Gender", strong("Seperate Pokemon by Gender?"), FALSE),
conditionalPanel(
condition = "input.Gender == true",
helpText((HTML("<h3>Select a Gender: </h3>"))),
((HTML("<h5>Please be patient while generating. </h5>"))),
radioButtons("gendersource", "",
c(#"All" = "All",
"Female" = "Female",
"Male" = "Male"
#"No Gender" = "None"
))
),
((HTML('<h5>For "Locations of Appearances" Tab: </h5>'))),
checkboxInput("Weather", strong("Seperate Appearances by Weather?"), FALSE),
conditionalPanel(
condition = "input.Weather == true",
helpText((HTML("<h3>Select Weather: </h3>"))),
((HTML("<h5>Please be patient while generating. </h5>"))),
radioButtons("weathersource", "",
c("Windy" = "Windy","Snow" = "Snow","Rainy" = "Rainy",
"Partly Cloudy" = "Partly Cloudy", "None" = "None",
"Cloudy" = "Cloudy", "Clear" = "Clear"))
),
((HTML('<h5>For "Attacks VS Defense" and "Colors and Types" Tab: </h5>'))),
selectInput("ColChoice", "Pokemon Color:",
c("White", "Green", "Red", "Blue", "Brown", "Yellow",
"Purple", "Pink", "Grey", "Black")),
((HTML('<h5>For "Attacks VS Defense" Tab Only: </h5>'))),
checkboxInput("LinReg",strong("Add Linear Regression Line?"), FALSE),
checkboxInput("NewP", strong("Add yourself as a Pokemon?"), FALSE),
conditionalPanel(
condition = "input.NewP == true", #Note th lowercase logical
#helpText(HTML("<h5>For Attacks VS Defense Graph Only</h5>")),
helpText(HTML("<h3>Choose your Attack and Defense levels.</h3>")),
sliderInput("NewA", "Attack (will not effect regression):",
min = 0, max = 165, value = 75, step = 1),
sliderInput("NewD", "Defense(will not effect regression):",
min = 0, max = 230, value = 120, step = 1))
###checkboxInput("Word", strong("Create a Bar Plot for word use by
### bot account tweets?"),
### FALSE),
###conditionalPanel(
### condition = "input.Word == true",
### helpText((HTML("<h3>Select the bot account: </h3>"))),
### #((HTML("<h5>Please be patient while generating. </h5>"))),
### radioButtons("datasource", "",
### c("Vancouver (in BarPlots Tab)" = "Vancouver Pokemon Tweets",
### "Toronto (in BarPlots Tab)" = "Toronto Pokemon Tweets",
### "Chicago (in BarPlots Tab)" = "Chicago Pokemon Tweets"))
### #helpText((HTML("<h5>Clouds with words used between 50 and 11000 times </h5>")))
###)
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("About", source("About.R")$value()),
tabPanel("Appearances By Weather", plotOutput("WeatherPlot")),
tabPanel("Locations of Appearances", plotOutput("MapPlot")),
tabPanel("Attacks VS Defense", plotOutput("ColorPlot")),
tabPanel("Colors and Types", plotOutput("HistPlot"))
#tabPanel("Bar Plots", plotOutput("WordPlot"))
)
)
)
))
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -4.17960301477471e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615827541-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 734 | r | testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -4.17960301477471e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
#LIBRARIES
library(readxl)
library(RPostgreSQL)
library(rpostgis)
library(stringr)
library(geosphere)
library(tidyr)
library(dplyr)
library(sf)
#CONFIG
config_path <- '~/shared/rural_planner/config_files/config_br'
source(config_path)
#VARIABLES
input_path <- paste0(input_path_infrastructure,"/intermediate outputs", sep="")
#Intermediate outputs to be consolidated
file_names_pts <- c("vivo.rds", "third_party_nodes.rds", "infra_competitors.rds")
file_names_tx_vivo <- c("vivo_fo_pops.rds", "vivo_mw_pops.rds")
file_names_lines <- c("third_party_lines.rds", "vogel_lines.rds")
output_path <- paste0(input_path_infrastructure,"/intermediate outputs", sep="")
file_name <- "towers_aux.rds"
source('~/shared/rural_planner/sql/br/infrastructure/06_uploadDBFinalConsolidation.R')
source('~/shared/rural_planner/sql/br/infrastructure/06_updateDBIOConsolidation.R')
source('~/shared/rural_planner/functions/readAllFiles.R')
#Join all outputs to one single data frame. Remove some duplicates
#Function to combine data frames
towers_pts_raw <- readAllFiles(file_names_pts,output_path)
towers_lines_raw <- readAllFiles(file_names_lines, output_path)
towers_tx <- readAllFiles(file_names_tx_vivo, output_path)
vivo_ids <- unique(towers_pts_raw$internal_id[towers_pts_raw$source=="VIVO"])
towers_tx <- towers_tx[!(towers_tx$internal_id%in%vivo_ids),]
towers_pts_raw <- rbind(towers_pts_raw,towers_tx)
# AD-HOC: Clean corrupt geometries and parse geom format from wkt
towers_lines_raw <- towers_lines_raw[!(towers_lines_raw$wkt=='LINESTRING Z ()'),]
# This line removes linestrings with one point only:
towers_lines_raw <- towers_lines_raw[(grepl(",",towers_lines_raw$wkt)),]
# AD-HOC: Remove duplicates
towers <- towers_pts_raw[order(towers_pts_raw$source, towers_pts_raw$internal_id),]
towers <- towers %>% distinct(source, latitude, longitude, .keep_all=T)
traces <- towers_lines_raw[order(towers_lines_raw$source, towers_lines_raw$internal_id),]
traces <- traces %>% distinct(source, wkt, .keep_all=T)
## AD-HOC: Fix tower_heights
towers$tower_height[towers$tower_height<0] <- -(towers$tower_height[towers$tower_height<0])
towers$tower_height[towers$tower_height>150] <- mean(towers$tower_height[towers$tower_height<=150])
##Upload to database (intermediate output)
infra_all <- updateDBIOConsolidation(schema_dev, table_lines, table_points, traces, towers)
#Export the normalized output
saveRDS(infra_all, paste(output_path, file_name, sep = "/"))
test <- readRDS(paste(output_path, file_name, sep = "/"))
identical(test, infra_all)
#Export and separate towers, access and transport
uploadDBFinalConsolidation(schema_dev, table_infrastructure_global, infra_all, atoll_table_all)
| /scripts_r/br/infrastructure/06_infrastructure_consolidation.R | no_license | Telefonica/rural-planner | R | false | false | 2,706 | r |
#LIBRARIES
library(readxl)
library(RPostgreSQL)
library(rpostgis)
library(stringr)
library(geosphere)
library(tidyr)
library(dplyr)
library(sf)
#CONFIG
config_path <- '~/shared/rural_planner/config_files/config_br'
source(config_path)
#VARIABLES
input_path <- paste0(input_path_infrastructure,"/intermediate outputs", sep="")
#Intermediate outputs to be consolidated
file_names_pts <- c("vivo.rds", "third_party_nodes.rds", "infra_competitors.rds")
file_names_tx_vivo <- c("vivo_fo_pops.rds", "vivo_mw_pops.rds")
file_names_lines <- c("third_party_lines.rds", "vogel_lines.rds")
output_path <- paste0(input_path_infrastructure,"/intermediate outputs", sep="")
file_name <- "towers_aux.rds"
source('~/shared/rural_planner/sql/br/infrastructure/06_uploadDBFinalConsolidation.R')
source('~/shared/rural_planner/sql/br/infrastructure/06_updateDBIOConsolidation.R')
source('~/shared/rural_planner/functions/readAllFiles.R')
#Join all outputs to one single data frame. Remove some duplicates
#Function to combine data frames
towers_pts_raw <- readAllFiles(file_names_pts,output_path)
towers_lines_raw <- readAllFiles(file_names_lines, output_path)
towers_tx <- readAllFiles(file_names_tx_vivo, output_path)
vivo_ids <- unique(towers_pts_raw$internal_id[towers_pts_raw$source=="VIVO"])
towers_tx <- towers_tx[!(towers_tx$internal_id%in%vivo_ids),]
towers_pts_raw <- rbind(towers_pts_raw,towers_tx)
# AD-HOC: Clean corrupt geometries and parse geom format from wkt
towers_lines_raw <- towers_lines_raw[!(towers_lines_raw$wkt=='LINESTRING Z ()'),]
# This line removes linestrings with one point only:
towers_lines_raw <- towers_lines_raw[(grepl(",",towers_lines_raw$wkt)),]
# AD-HOC: Remove duplicates
towers <- towers_pts_raw[order(towers_pts_raw$source, towers_pts_raw$internal_id),]
towers <- towers %>% distinct(source, latitude, longitude, .keep_all=T)
traces <- towers_lines_raw[order(towers_lines_raw$source, towers_lines_raw$internal_id),]
traces <- traces %>% distinct(source, wkt, .keep_all=T)
## AD-HOC: Fix tower_heights
towers$tower_height[towers$tower_height<0] <- -(towers$tower_height[towers$tower_height<0])
towers$tower_height[towers$tower_height>150] <- mean(towers$tower_height[towers$tower_height<=150])
##Upload to database (intermediate output)
infra_all <- updateDBIOConsolidation(schema_dev, table_lines, table_points, traces, towers)
#Export the normalized output
saveRDS(infra_all, paste(output_path, file_name, sep = "/"))
test <- readRDS(paste(output_path, file_name, sep = "/"))
identical(test, infra_all)
#Export and separate towers, access and transport
uploadDBFinalConsolidation(schema_dev, table_infrastructure_global, infra_all, atoll_table_all)
|
# Prepare the data
rowIndex <- sample(1:nrow(diamonds), 0.01*nrow(diamonds))
dSmall <- diamonds[rowIndex, ]
qplot(carat, price, data=dSmall, geom=c("point", "smooth"))
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), span=0.1)
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), span=0.8)
# Adjust transparency (1/20 => 20 points needed to produce 1 fully opague point.)
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), span=0.1, alpha=I(1/5))
library(mgcv)
# gam
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="gam", formula=y ~ s(x)) # generalised additive model
# lm
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="lm") # fit a straight line
# robust regression (rlm)
library(MASS)
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="rlm") # fit a rlm line of best fit. Outliers have a lesser impact on the fitted line.
# splines
library(splines)
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="lm", formula=y ~ ns(x, 5)) # natural splines
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="lm", formula=y ~ poly(x, 3)) # 3rd degree polynomil
# Density
qplot(carat, data=dSmall, geom=c("density")) # Density
# Density for each category of color
qplot(carat, data=dSmall, geom=c("density"), colour=color) # Density
# Histogram
qplot(carat, data=dSmall, geom=c("histogram"), binwidth=1, xlim=c(0, 3))
qplot(carat, data=dSmall, geom=c("histogram"), binwidth=0.1, xlim=c(0, 3))
qplot(carat, data=dSmall, geom=c("histogram"), binwidth=0.01, xlim=c(0, 3))
# histgram based on color
qplot(carat, data=dSmall, geom=c("histogram"), fill=color)
# barchart
qplot(carat, data=dSmall, geom="bar")
qplot(date, unemploy, data=economics, geom="line", color='red')
# Line Chart / Timeseries
year <- function(x) as.POSIXlt(x)$year + 1900
qplot(date, unemploy, data=economics, geom="line", colour=year(date))
# Facets
qplot(carat, data=diamonds, facets=color ~ ., geom="histogram", binwidth=0.1, xlim=c(0, 3))
# Scatterplot with varied coloring
qplot(displ, hwy, data = mpg, colour = factor(cyl))
| /ggplot2/ggplot_script.R | no_license | Sudhindra0803/myjunk | R | false | false | 2,135 | r | # Prepare the data
rowIndex <- sample(1:nrow(diamonds), 0.01*nrow(diamonds))
dSmall <- diamonds[rowIndex, ]
qplot(carat, price, data=dSmall, geom=c("point", "smooth"))
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), span=0.1)
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), span=0.8)
# Adjust transparency (1/20 => 20 points needed to produce 1 fully opague point.)
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), span=0.1, alpha=I(1/5))
library(mgcv)
# gam
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="gam", formula=y ~ s(x)) # generalised additive model
# lm
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="lm") # fit a straight line
# robust regression (rlm)
library(MASS)
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="rlm") # fit a rlm line of best fit. Outliers have a lesser impact on the fitted line.
# splines
library(splines)
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="lm", formula=y ~ ns(x, 5)) # natural splines
qplot(carat, price, data=dSmall, geom=c("point", "smooth"), method="lm", formula=y ~ poly(x, 3)) # 3rd degree polynomil
# Density
qplot(carat, data=dSmall, geom=c("density")) # Density
# Density for each category of color
qplot(carat, data=dSmall, geom=c("density"), colour=color) # Density
# Histogram
qplot(carat, data=dSmall, geom=c("histogram"), binwidth=1, xlim=c(0, 3))
qplot(carat, data=dSmall, geom=c("histogram"), binwidth=0.1, xlim=c(0, 3))
qplot(carat, data=dSmall, geom=c("histogram"), binwidth=0.01, xlim=c(0, 3))
# histgram based on color
qplot(carat, data=dSmall, geom=c("histogram"), fill=color)
# barchart
qplot(carat, data=dSmall, geom="bar")
qplot(date, unemploy, data=economics, geom="line", color='red')
# Line Chart / Timeseries
year <- function(x) as.POSIXlt(x)$year + 1900
qplot(date, unemploy, data=economics, geom="line", colour=year(date))
# Facets
qplot(carat, data=diamonds, facets=color ~ ., geom="histogram", binwidth=0.1, xlim=c(0, 3))
# Scatterplot with varied coloring
qplot(displ, hwy, data = mpg, colour = factor(cyl))
|
#Low Tide (Strata) vs. Abundances (Biological)
#Clear Yer Stuff
rm(list=ls())
#Load Data
TotalData<- read.csv("C:/Users/Jessica/Documents/GitHub/Dal-Intertidal-2014/Intertidal_Master_Data_Sheet_2014.csv")
######### Make new data set with only salinities and abundances#########
#Gives Salinities at high tide
unique(TotalData$Salinity[TotalData$Strata=="H"])
#Bind salinity and abundance in data table
SalinityTable <- cbind(TotalData$Salinity, TotalData$ab_m3_Glycera_dibrachiata)
#Makes SalinityTable a new data frame
SalinityTable <- as.data.frame(SalinityTable)
# change NAs to 0s
SalinityTable$V2[is.na(SalinityTable$V2)]=0
#Change Column Names
colnames(SalinityTable) <- c("Salinity", "Abundance")
##
require(plyr)
TotalData[is.na(TotalData)]=0
FigureTable<- ddply(TotalData,.(Salinity),summarize,
MeanAbundance=mean(ab_m3_Glycera_dibrachiata/Core_m3,na.rm = TRUE),
SDAbundance=sd(ab_m3_Glycera_dibrachiata/Core_m3,na.rm = TRUE),
NAbundance=sum(is.na(ab_m3_Glycera_dibrachiata)==F)
)
###### Make Graph ########
jpeg('SalinityAbundance1.jpeg', height=1200, width=2400, res=400, qual=100 )
barplot(FigureTable$MeanAbundance, names.arg=FigureTable$Salinity, xlab="Salinity", ylab= expression ("Abundance (Ind/m"^3*")"), main=" ")
dev.off()
getwd()
#### Error Bars ###
AB_mean <- FigureTable$MeanAbundance
AB_se <- tapply(SalinityTable$Abundance,INDEX=SalinityTable$Salinity, sd, na.rm = TRUE)/sqrt(count(SalinityTable,vars="Salinity")$freq)
jpeg('SalinityAbundance1', height=1200, width=2400, res=400, qual=100 )
mp <- barplot(FigureTable$MeanAbundance, names.arg=FigureTable$Salinity, xlab="Salinity", ylab= expression ("Abundance (Ind/m"^3*")"), main=" ",ylim=c(0,250)) # plots the barplot and saves the midpoints in mp
segments(mp, AB_mean + AB_se, mp,AB_mean, lwd=2) # plots positive error bar centered on mp
segments(mp - 0.1, AB_mean + AB_se, mp + 0.1, AB_mean + AB_se, lwd=2) #plots error bar caps
dev.off()
getwd()
| /Team B Code/Salinity vs abundance species R codes/SalinityVsAbundance ab_m3_Glycera_dibrachiata.R | permissive | ohara-patrick/Dal-Intertidal-2014 | R | false | false | 2,009 | r | #Low Tide (Strata) vs. Abundances (Biological)
#Clear Yer Stuff
rm(list=ls())
#Load Data
TotalData<- read.csv("C:/Users/Jessica/Documents/GitHub/Dal-Intertidal-2014/Intertidal_Master_Data_Sheet_2014.csv")
######### Make new data set with only salinities and abundances#########
#Gives Salinities at high tide
unique(TotalData$Salinity[TotalData$Strata=="H"])
#Bind salinity and abundance in data table
SalinityTable <- cbind(TotalData$Salinity, TotalData$ab_m3_Glycera_dibrachiata)
#Makes SalinityTable a new data frame
SalinityTable <- as.data.frame(SalinityTable)
# change NAs to 0s
SalinityTable$V2[is.na(SalinityTable$V2)]=0
#Change Column Names
colnames(SalinityTable) <- c("Salinity", "Abundance")
##
require(plyr)
TotalData[is.na(TotalData)]=0
FigureTable<- ddply(TotalData,.(Salinity),summarize,
MeanAbundance=mean(ab_m3_Glycera_dibrachiata/Core_m3,na.rm = TRUE),
SDAbundance=sd(ab_m3_Glycera_dibrachiata/Core_m3,na.rm = TRUE),
NAbundance=sum(is.na(ab_m3_Glycera_dibrachiata)==F)
)
###### Make Graph ########
jpeg('SalinityAbundance1.jpeg', height=1200, width=2400, res=400, qual=100 )
barplot(FigureTable$MeanAbundance, names.arg=FigureTable$Salinity, xlab="Salinity", ylab= expression ("Abundance (Ind/m"^3*")"), main=" ")
dev.off()
getwd()
#### Error Bars ###
AB_mean <- FigureTable$MeanAbundance
AB_se <- tapply(SalinityTable$Abundance,INDEX=SalinityTable$Salinity, sd, na.rm = TRUE)/sqrt(count(SalinityTable,vars="Salinity")$freq)
jpeg('SalinityAbundance1', height=1200, width=2400, res=400, qual=100 )
mp <- barplot(FigureTable$MeanAbundance, names.arg=FigureTable$Salinity, xlab="Salinity", ylab= expression ("Abundance (Ind/m"^3*")"), main=" ",ylim=c(0,250)) # plots the barplot and saves the midpoints in mp
segments(mp, AB_mean + AB_se, mp,AB_mean, lwd=2) # plots positive error bar centered on mp
segments(mp - 0.1, AB_mean + AB_se, mp + 0.1, AB_mean + AB_se, lwd=2) #plots error bar caps
dev.off()
getwd()
|
\name{getLanguages}
\alias{getLanguages}
\title{
Return list of languages
}
\description{
Return list of languages: rows from lang_lst table
}
\usage{
getLanguages(dbCon)
}
\arguments{
\item{dbCon}{
database connection
}
}
\details{
Read from database and return data frame of language list (lang_lst) table rows.
}
\value{
Data frame with $lang_id, $lang_code, $lang_name columns of lang_lst table
}
\references{
OpenM++ documentation: https://github.com/openmpp/openmpp.github.io/wiki
}
\author{
amc1999
}
\note{
To run examples you must have modelOne database modelOne.sqlite in current directory
}
\seealso{
\code{\link{getModel}}
}
\examples{
theDb <- dbConnect(RSQLite::SQLite(), "modelOne.sqlite", synchronous = "full")
invisible(dbGetQuery(theDb, "PRAGMA busy_timeout = 86400")) # recommended
langRs <- getLanguages(theDb)
dbDisconnect(theDb)
}
\keyword{ OpenM++ }
\keyword{ database }
| /openMpp/man/getLanguages.Rd | permissive | openmpp/R | R | false | false | 917 | rd | \name{getLanguages}
\alias{getLanguages}
\title{
Return list of languages
}
\description{
Return list of languages: rows from lang_lst table
}
\usage{
getLanguages(dbCon)
}
\arguments{
\item{dbCon}{
database connection
}
}
\details{
Read from database and return data frame of language list (lang_lst) table rows.
}
\value{
Data frame with $lang_id, $lang_code, $lang_name columns of lang_lst table
}
\references{
OpenM++ documentation: https://github.com/openmpp/openmpp.github.io/wiki
}
\author{
amc1999
}
\note{
To run examples you must have modelOne database modelOne.sqlite in current directory
}
\seealso{
\code{\link{getModel}}
}
\examples{
theDb <- dbConnect(RSQLite::SQLite(), "modelOne.sqlite", synchronous = "full")
invisible(dbGetQuery(theDb, "PRAGMA busy_timeout = 86400")) # recommended
langRs <- getLanguages(theDb)
dbDisconnect(theDb)
}
\keyword{ OpenM++ }
\keyword{ database }
|
#' Print a Color Chart for a Palette
#'
#' \code{ColorChart()} creates a visual reference of a color palette as it
#' would appear to trichromat viewers and to viewers with three types of
#' color blindness:
#' deuteranopia (red-green color blindness; 1\% of males),
#' protanopia (red-green color blindness; 1\% of males), and
#' tritanopia (blue-yellow color blindness; < 1\% of males and females).
#'
#' If \code{palette} is \code{NULL}, then a color chart for the six Callier
#' Center colors (solar orange, space blue, callier gray, spark orange,
#' stratos blue, sky blue) is created, and the x-axis is labeled with the
#' color names rather than their hexadecimal codes.
#'
#' The \code{dichromat} package is used to simulate color blindness.
#'
#' @param palette \code{NULL} (default) or a character vector, whose entries
#' are hexadecimal codes for values in the RGB (red-green-blue) color model.
#' @param colorblind Logical that determines whether simulations of
#' color blindness (deuteranopia, protanopia, tritanopia) are shown
#' (\code{TRUE}) or not (\code{FALSE)}) along with the trichromat color
#' palette. Default is \code{TRUE}.
#' @param chipsize Numeric that is passed to \code{geom_point(size = )}.
#' Determines the size of the color chips in the chart. Default is 25.
#' @return A \code{ggplot} object with the following aesthetics:
#' colors are ordered as they occur in \code{palette} and mapped to the
#' x-dimension, with the x-axis labeled by hexadecimal codes;
#' vision is mapped to the y-dimension, with the y-axis labeled by type of
#' vision (trichromat, deuteranopia, protanopia, tritanopia).
#' @export
ColorChart <- function(palette = NULL, colorblind = TRUE, chipsize = 25) {
if (is.null(palette)) {
palette <- c(
'solarOrange' = solarOrange(),
'spaceBlue' = spaceBlue(),
'callierGray' = callierGray(),
'sparkOrange' = sparkOrange(),
'stratosBlue' = stratosBlue(),
'skyBlue' = skyBlue(),
'ecoGreen' = ecoGreen(),
'saplingGreen' = saplingGreen(),
'seedlingGreen' = seedlingGreen()
)
}
if (is.null(names(palette))) {
names(palette) <- palette
}
# Rectangularize the information in `palette`.
.callier <- data.frame(
Vision = 'Trichromat',
Color = names(palette),
Hex = palette,
stringsAsFactors = FALSE
)
if (colorblind) {
# Map the `.callier` scheme to different forms of dichromatism.
.schemes <- rbind(
.callier,
within(.callier, {
Vision <- 'Deuteranopia'
Hex <- dichromat::dichromat(Hex, type = 'deutan')
}),
within(.callier, {
Vision <- 'Protanopia'
Hex <- dichromat::dichromat(Hex, type = 'protan')
}),
within(.callier, {
Vision <- 'Tritanopia'
Hex <- dichromat::dichromat(Hex, type = 'tritan')
})
)
# Factor the variables in `.schemes`.
.schemes <- within(.schemes, {
Vision <- factor(
Vision,
levels = c('Tritanopia', 'Protanopia', 'Deuteranopia', 'Trichromat')
)
Color <- factor(Color, names(palette))
})
# Partition `.schemes` by level of $Vision.
.trichrom <- subset(.schemes, as.character(Vision) == 'Trichromat')
.deuteran <- subset(.schemes, as.character(Vision) == 'Deuteranopia')
.protan <- subset(.schemes, as.character(Vision) == 'Protanopia')
.tritan <- subset(.schemes, as.character(Vision) == 'Tritanopia')
# Plot.
.x <- ggplot2::ggplot(data = .schemes, ggplot2::aes(x = Color, y = Vision)) +
ggplot2::theme_bw() +
ggplot2::geom_point(data = .schemes, colour = 'white', size = 1) +
ggplot2::geom_point(data = .trichrom, colour = .trichrom$Hex, size = chipsize) +
ggplot2::geom_point(data = .deuteran, colour = .deuteran$Hex, size = chipsize) +
ggplot2::geom_point(data = .protan, colour = .protan$Hex, size = chipsize) +
ggplot2::geom_point(data = .tritan, colour = .tritan$Hex, size = chipsize)
} else {
.callier <- within(.callier, {
Vision <- ''
Color <- factor(Color, names(palette))
})
.x <- ggplot2::ggplot(data = .callier, ggplot2::aes(x = Color, y = Vision)) +
ggplot2::theme_bw() +
ggplot2::ylab('') +
ggplot2::scale_y_discrete(breaks = NULL) +
ggplot2::geom_point(data = .callier, colour = .callier$Hex, size = chipsize)
}
return(.x)
}
| /R/color-chart.R | no_license | patrickreidy/callierr | R | false | false | 4,432 | r | #' Print a Color Chart for a Palette
#'
#' \code{ColorChart()} creates a visual reference of a color palette as it
#' would appear to trichromat viewers and to viewers with three types of
#' color blindness:
#' deuteranopia (red-green color blindness; 1\% of males),
#' protanopia (red-green color blindness; 1\% of males), and
#' tritanopia (blue-yellow color blindness; < 1\% of males and females).
#'
#' If \code{palette} is \code{NULL}, then a color chart for the six Callier
#' Center colors (solar orange, space blue, callier gray, spark orange,
#' stratos blue, sky blue) is created, and the x-axis is labeled with the
#' color names rather than their hexadecimal codes.
#'
#' The \code{dichromat} package is used to simulate color blindness.
#'
#' @param palette \code{NULL} (default) or a character vector, whose entries
#' are hexadecimal codes for values in the RGB (red-green-blue) color model.
#' @param colorblind Logical that determines whether simulations of
#' color blindness (deuteranopia, protanopia, tritanopia) are shown
#' (\code{TRUE}) or not (\code{FALSE)}) along with the trichromat color
#' palette. Default is \code{TRUE}.
#' @param chipsize Numeric that is passed to \code{geom_point(size = )}.
#' Determines the size of the color chips in the chart. Default is 25.
#' @return A \code{ggplot} object with the following aesthetics:
#' colors are ordered as they occur in \code{palette} and mapped to the
#' x-dimension, with the x-axis labeled by hexadecimal codes;
#' vision is mapped to the y-dimension, with the y-axis labeled by type of
#' vision (trichromat, deuteranopia, protanopia, tritanopia).
#' @export
ColorChart <- function(palette = NULL, colorblind = TRUE, chipsize = 25) {
if (is.null(palette)) {
palette <- c(
'solarOrange' = solarOrange(),
'spaceBlue' = spaceBlue(),
'callierGray' = callierGray(),
'sparkOrange' = sparkOrange(),
'stratosBlue' = stratosBlue(),
'skyBlue' = skyBlue(),
'ecoGreen' = ecoGreen(),
'saplingGreen' = saplingGreen(),
'seedlingGreen' = seedlingGreen()
)
}
if (is.null(names(palette))) {
names(palette) <- palette
}
# Rectangularize the information in `palette`.
.callier <- data.frame(
Vision = 'Trichromat',
Color = names(palette),
Hex = palette,
stringsAsFactors = FALSE
)
if (colorblind) {
# Map the `.callier` scheme to different forms of dichromatism.
.schemes <- rbind(
.callier,
within(.callier, {
Vision <- 'Deuteranopia'
Hex <- dichromat::dichromat(Hex, type = 'deutan')
}),
within(.callier, {
Vision <- 'Protanopia'
Hex <- dichromat::dichromat(Hex, type = 'protan')
}),
within(.callier, {
Vision <- 'Tritanopia'
Hex <- dichromat::dichromat(Hex, type = 'tritan')
})
)
# Factor the variables in `.schemes`.
.schemes <- within(.schemes, {
Vision <- factor(
Vision,
levels = c('Tritanopia', 'Protanopia', 'Deuteranopia', 'Trichromat')
)
Color <- factor(Color, names(palette))
})
# Partition `.schemes` by level of $Vision.
.trichrom <- subset(.schemes, as.character(Vision) == 'Trichromat')
.deuteran <- subset(.schemes, as.character(Vision) == 'Deuteranopia')
.protan <- subset(.schemes, as.character(Vision) == 'Protanopia')
.tritan <- subset(.schemes, as.character(Vision) == 'Tritanopia')
# Plot.
.x <- ggplot2::ggplot(data = .schemes, ggplot2::aes(x = Color, y = Vision)) +
ggplot2::theme_bw() +
ggplot2::geom_point(data = .schemes, colour = 'white', size = 1) +
ggplot2::geom_point(data = .trichrom, colour = .trichrom$Hex, size = chipsize) +
ggplot2::geom_point(data = .deuteran, colour = .deuteran$Hex, size = chipsize) +
ggplot2::geom_point(data = .protan, colour = .protan$Hex, size = chipsize) +
ggplot2::geom_point(data = .tritan, colour = .tritan$Hex, size = chipsize)
} else {
.callier <- within(.callier, {
Vision <- ''
Color <- factor(Color, names(palette))
})
.x <- ggplot2::ggplot(data = .callier, ggplot2::aes(x = Color, y = Vision)) +
ggplot2::theme_bw() +
ggplot2::ylab('') +
ggplot2::scale_y_discrete(breaks = NULL) +
ggplot2::geom_point(data = .callier, colour = .callier$Hex, size = chipsize)
}
return(.x)
}
|
get_data_spsur <- function(formula, mf, Durbin = FALSE,
listw = NULL, zero.policy = NULL,
N = NULL, Tm = NULL) {
# Function to get data ordered for spatio-temporal SUR
# Assumption: Data Frame ordered by space (N)
# Order of data N*G*Tm
if (is.null(zero.policy))
zero.policy <- spatialreg::get.ZeroPolicyOption()
G <- length(attr(formula, "lhs"))
if (length(attr(formula, "rhs")) < G) { ## Repeat RHS...
for (i in 2:G) {
attr(formula, "rhs")[[i]] <- attr(formula, "rhs")[[1]]
}
}
if (inherits(Durbin, "formula")) {
if (!inherits(Durbin, "Formula"))
Durbin <- Formula::Formula(Durbin)
if (length(attr(Durbin, "rhs")) < G) { ## Repeat RHS...
for (i in 2:G) {
attr(Durbin, "rhs")[[i]] <- attr(Durbin, "rhs")[[1]]
}
}
}
if (is.null(Tm)) {
N <- nrow(mf)
} else {
N <- nrow(mf) / Tm
}
if (!is.null(listw)) {
W <- as(spdep::listw2mat(listw), "dgCMatrix")
}
if (is.null(listw) && is.null(N))
stop("Dimension of spatial sample is needed")
if (is.null(Tm)) {
if (G > 1) { Tm <- nrow(mf) / N } else { Tm <- 1 }## REPASAR Tm
}
Y <- vector("list", G)
X <- vector("list", G)
WX <- vector("list", G)
p <- rep(0, G)
dvars <- vector("list", G)
for (i in 1:G) {
Yi <- as.matrix(Formula::model.part(formula, data = mf,
lhs = i))
Xi <- model.matrix(formula, data = mf, rhs = i)
colnames(Xi) <- paste(colnames(Xi), i, sep = "_")
colnames(Yi) <- paste(colnames(Yi), i, sep = "_")
dvars[[i]] <- c(ncol(Xi), 0L)
icept <- grep("(Intercept)", colnames(Xi))
iicept <- length(icept) > 0L
Y[[i]] <- Yi
if (isTRUE(Durbin) || inherits(Durbin, "formula")) {
prefix <- "lag"
if (isTRUE(Durbin)) {
if (iicept) {
if (Tm == 1) {
WXi <- spatialreg::create_WX(Xi[,-c(icept), drop = FALSE],
listw,
zero.policy = zero.policy,
prefix = prefix)
} else {
WXi <- NULL
for (j in 1:Tm) {
Xit <- Xi[((((i-1)*N)+1):(i*N)), ]
WXit <- spatialreg::create_WX(Xit[,-c(icept), drop = FALSE],
listw,
zero.policy = zero.policy,
prefix = prefix)
WXi <- rbind(WXi, WXit)
}
}
} else {
if (Tm == 1) {
WXi <- spatialreg::create_WX(Xi, listw,
zero.policy = zero.policy,
prefix = prefix)
} else {
WXi <- NULL
for (j in 1:Tm) {
Xit <- Xi[((((i-1)*N)+1):(i*N)), ]
WXit <- spatialreg::create_WX(Xit, listw,
zero.policy = zero.policy,
prefix = prefix)
WXi <- rbind(WXi, WXit)
}
}
}
} else { ## Durbin is formula ...
fXi <- try(model.matrix(Durbin, data = mf, rhs = i),
silent = TRUE)
if (inherits(fXi, "try-error"))
stop("Durbin variable mist-match")
if (iicept) {
if (Tm == 1) {
WXi <- spatialreg::create_WX(fXi[,-c(icept), drop = FALSE],
listw,
zero.policy = zero.policy,
prefix = prefix)
} else {
WXi <- NULL
for (j in 1:Tm) {
fXit <- fXi[((((i-1)*N)+1):(i*N)), ]
WXit <- spatialreg::create_WX(fXit[,-c(icept), drop = FALSE],
listw,
zero.policy = zero.policy,
prefix = prefix)
WXi <- rbind(WXi, WXit)
}
}
} else {
if (Tm == 1) {
WXi <- spatialreg::create_WX(fXi, listw,
zero.policy = zero.policy,
prefix = prefix)
} else {
WXi <- NULL
for (j in 1:Tm) {
fXit <- fXi[((((i-1)*N)+1):(i*N)), ]
WXit <- spatialreg::create_WX(fXit, listw,
zero.policy = zero.policy,
prefix = prefix)
WXi <- rbind(WXi, WXit)
}
}
}
#WXi <- spatialreg::create_WX(fXi, listw,
# zero.policy = zero.policy,
# prefix = prefix)
colnames(WXi) <- paste(colnames(WXi), i, sep = "_")
inds <- match(substring(colnames(WXi), 5,
nchar(colnames(WXi))),
colnames(Xi))
if (anyNA(inds))
stop("WX variables not in X: ",
paste(substring(colnames(WXi), 5,
nchar(colnames(WXi)))[is.na(inds)],
collapse = " "))
if (iicept) {
xni <- colnames(Xi)[-1]
} else {
xni <- colnames(Xi)
}
wxni <- substring(colnames(WXi), nchar(prefix) + 2,
nchar(colnames(WXi)))
zero_fill <- NULL
if (length((which(!(xni %in% wxni)))) > 0L)
zero_fill <- length(xni) + (which(!(xni %in% wxni)))
}
dvars[[i]] <- c(ncol(Xi), ncol(WXi))
if (inherits(Durbin, "formula")) {
attr(dvars[[i]], "f") <- attr(Durbin, "rhs")[[i]]
attr(dvars[[i]], "inds") <- inds
attr(dvars[[i]], "zero_fill") <- zero_fill
}
X[[i]] <- cbind(Xi, WXi)
} else X[[i]] <- Xi
p[i] <- ncol(X[[i]])
}
Yt <- vector("list", Tm)
Xt <- vector("list", Tm)
for (i in 1:Tm) {
Yg <- vector("list", G)
Xg <- vector("list", G)
for (j in 1:G){
# Lee R filas de cada vector Yi y matriz Xi
Yj <- matrix(Y[[j]][((i-1)*N+1):(i*N)], ncol = 1)
Xj <- X[[j]][((i-1)*N+1):(i*N),]
colnames(Yj) <- colnames(Y[[j]])
colnames(Xj) <- colnames(X[[j]])
Yg[[j]] <- Yj
Xg[[j]] <- Xj
}
Yt[[i]] <- unlist(Yg)
Xt[[i]] <- Matrix::bdiag(Xg)
}
# Final Matrices
Yf <- Yt[[1]]
Xf <- Xt[[1]]
if (Tm > 1) {
for (i in 2:Tm) {
Yf <- c(Yf,Yt[[i]])
Xf <- rbind(Xf,Xt[[i]])
}
}
Yf <- as.matrix(Yf, ncol = 1)
Xf <- as.matrix(Xf)
names_colX <- NULL
for (i in 1:G) {
names_colX <- c(names_colX,colnames(X[[i]]))
}
colnames(Xf) <- names_colX
res <- list(Y = Yf, X = Xf, G = G, N = N, Tm = Tm, p = p,
dvars = dvars)
}
| /R/get_data_spsur.R | no_license | cran/spsur | R | false | false | 7,182 | r | get_data_spsur <- function(formula, mf, Durbin = FALSE,
listw = NULL, zero.policy = NULL,
N = NULL, Tm = NULL) {
# Function to get data ordered for spatio-temporal SUR
# Assumption: Data Frame ordered by space (N)
# Order of data N*G*Tm
if (is.null(zero.policy))
zero.policy <- spatialreg::get.ZeroPolicyOption()
G <- length(attr(formula, "lhs"))
if (length(attr(formula, "rhs")) < G) { ## Repeat RHS...
for (i in 2:G) {
attr(formula, "rhs")[[i]] <- attr(formula, "rhs")[[1]]
}
}
if (inherits(Durbin, "formula")) {
if (!inherits(Durbin, "Formula"))
Durbin <- Formula::Formula(Durbin)
if (length(attr(Durbin, "rhs")) < G) { ## Repeat RHS...
for (i in 2:G) {
attr(Durbin, "rhs")[[i]] <- attr(Durbin, "rhs")[[1]]
}
}
}
if (is.null(Tm)) {
N <- nrow(mf)
} else {
N <- nrow(mf) / Tm
}
if (!is.null(listw)) {
W <- as(spdep::listw2mat(listw), "dgCMatrix")
}
if (is.null(listw) && is.null(N))
stop("Dimension of spatial sample is needed")
if (is.null(Tm)) {
if (G > 1) { Tm <- nrow(mf) / N } else { Tm <- 1 }## REPASAR Tm
}
Y <- vector("list", G)
X <- vector("list", G)
WX <- vector("list", G)
p <- rep(0, G)
dvars <- vector("list", G)
for (i in 1:G) {
Yi <- as.matrix(Formula::model.part(formula, data = mf,
lhs = i))
Xi <- model.matrix(formula, data = mf, rhs = i)
colnames(Xi) <- paste(colnames(Xi), i, sep = "_")
colnames(Yi) <- paste(colnames(Yi), i, sep = "_")
dvars[[i]] <- c(ncol(Xi), 0L)
icept <- grep("(Intercept)", colnames(Xi))
iicept <- length(icept) > 0L
Y[[i]] <- Yi
if (isTRUE(Durbin) || inherits(Durbin, "formula")) {
prefix <- "lag"
if (isTRUE(Durbin)) {
if (iicept) {
if (Tm == 1) {
WXi <- spatialreg::create_WX(Xi[,-c(icept), drop = FALSE],
listw,
zero.policy = zero.policy,
prefix = prefix)
} else {
WXi <- NULL
for (j in 1:Tm) {
Xit <- Xi[((((i-1)*N)+1):(i*N)), ]
WXit <- spatialreg::create_WX(Xit[,-c(icept), drop = FALSE],
listw,
zero.policy = zero.policy,
prefix = prefix)
WXi <- rbind(WXi, WXit)
}
}
} else {
if (Tm == 1) {
WXi <- spatialreg::create_WX(Xi, listw,
zero.policy = zero.policy,
prefix = prefix)
} else {
WXi <- NULL
for (j in 1:Tm) {
Xit <- Xi[((((i-1)*N)+1):(i*N)), ]
WXit <- spatialreg::create_WX(Xit, listw,
zero.policy = zero.policy,
prefix = prefix)
WXi <- rbind(WXi, WXit)
}
}
}
} else { ## Durbin is formula ...
fXi <- try(model.matrix(Durbin, data = mf, rhs = i),
silent = TRUE)
if (inherits(fXi, "try-error"))
stop("Durbin variable mist-match")
if (iicept) {
if (Tm == 1) {
WXi <- spatialreg::create_WX(fXi[,-c(icept), drop = FALSE],
listw,
zero.policy = zero.policy,
prefix = prefix)
} else {
WXi <- NULL
for (j in 1:Tm) {
fXit <- fXi[((((i-1)*N)+1):(i*N)), ]
WXit <- spatialreg::create_WX(fXit[,-c(icept), drop = FALSE],
listw,
zero.policy = zero.policy,
prefix = prefix)
WXi <- rbind(WXi, WXit)
}
}
} else {
if (Tm == 1) {
WXi <- spatialreg::create_WX(fXi, listw,
zero.policy = zero.policy,
prefix = prefix)
} else {
WXi <- NULL
for (j in 1:Tm) {
fXit <- fXi[((((i-1)*N)+1):(i*N)), ]
WXit <- spatialreg::create_WX(fXit, listw,
zero.policy = zero.policy,
prefix = prefix)
WXi <- rbind(WXi, WXit)
}
}
}
#WXi <- spatialreg::create_WX(fXi, listw,
# zero.policy = zero.policy,
# prefix = prefix)
colnames(WXi) <- paste(colnames(WXi), i, sep = "_")
inds <- match(substring(colnames(WXi), 5,
nchar(colnames(WXi))),
colnames(Xi))
if (anyNA(inds))
stop("WX variables not in X: ",
paste(substring(colnames(WXi), 5,
nchar(colnames(WXi)))[is.na(inds)],
collapse = " "))
if (iicept) {
xni <- colnames(Xi)[-1]
} else {
xni <- colnames(Xi)
}
wxni <- substring(colnames(WXi), nchar(prefix) + 2,
nchar(colnames(WXi)))
zero_fill <- NULL
if (length((which(!(xni %in% wxni)))) > 0L)
zero_fill <- length(xni) + (which(!(xni %in% wxni)))
}
dvars[[i]] <- c(ncol(Xi), ncol(WXi))
if (inherits(Durbin, "formula")) {
attr(dvars[[i]], "f") <- attr(Durbin, "rhs")[[i]]
attr(dvars[[i]], "inds") <- inds
attr(dvars[[i]], "zero_fill") <- zero_fill
}
X[[i]] <- cbind(Xi, WXi)
} else X[[i]] <- Xi
p[i] <- ncol(X[[i]])
}
Yt <- vector("list", Tm)
Xt <- vector("list", Tm)
for (i in 1:Tm) {
Yg <- vector("list", G)
Xg <- vector("list", G)
for (j in 1:G){
# Lee R filas de cada vector Yi y matriz Xi
Yj <- matrix(Y[[j]][((i-1)*N+1):(i*N)], ncol = 1)
Xj <- X[[j]][((i-1)*N+1):(i*N),]
colnames(Yj) <- colnames(Y[[j]])
colnames(Xj) <- colnames(X[[j]])
Yg[[j]] <- Yj
Xg[[j]] <- Xj
}
Yt[[i]] <- unlist(Yg)
Xt[[i]] <- Matrix::bdiag(Xg)
}
# Final Matrices
Yf <- Yt[[1]]
Xf <- Xt[[1]]
if (Tm > 1) {
for (i in 2:Tm) {
Yf <- c(Yf,Yt[[i]])
Xf <- rbind(Xf,Xt[[i]])
}
}
Yf <- as.matrix(Yf, ncol = 1)
Xf <- as.matrix(Xf)
names_colX <- NULL
for (i in 1:G) {
names_colX <- c(names_colX,colnames(X[[i]]))
}
colnames(Xf) <- names_colX
res <- list(Y = Yf, X = Xf, G = G, N = N, Tm = Tm, p = p,
dvars = dvars)
}
|
ProvincieNaam <- "Noord-Brabant"
Gems <- c("GM0738","GM0748","GM0758","GM1719","GM0777","GM0779","GM1655","GM1709","GM0826","GM1674","GM0840","GM0851","GM0870","GM0873","GM0874","GM0879","GM1723","GM0744","GM0766","GM0784","GM0785","GM0797","GM0798","GM0809","GM0824","GM0855","GM0867","GM1721","GM0755","GM0756","GM0757","GM1684","GM0786","GM0788","GM0796","GM1685","GM1671","GM0815","GM0828","GM0844","GM1702","GM0845","GM0846","GM0856","GM0860","GM0865","GM0743","GM1724","GM0753","GM1728","GM0762","GM0770","GM0772","GM1771","GM1652","GM1658","GM0794","GM1659","GM0820","GM0823","GM1667","GM0847","GM0848","GM0858","GM0861","GM0866","GM1706")
RPAs <- c("RP25","RP26","RP27","RP28","RP30")
| /provinciesInR/NoordBrabant.r | no_license | edwindj/eugrid | R | false | false | 693 | r | ProvincieNaam <- "Noord-Brabant"
Gems <- c("GM0738","GM0748","GM0758","GM1719","GM0777","GM0779","GM1655","GM1709","GM0826","GM1674","GM0840","GM0851","GM0870","GM0873","GM0874","GM0879","GM1723","GM0744","GM0766","GM0784","GM0785","GM0797","GM0798","GM0809","GM0824","GM0855","GM0867","GM1721","GM0755","GM0756","GM0757","GM1684","GM0786","GM0788","GM0796","GM1685","GM1671","GM0815","GM0828","GM0844","GM1702","GM0845","GM0846","GM0856","GM0860","GM0865","GM0743","GM1724","GM0753","GM1728","GM0762","GM0770","GM0772","GM1771","GM1652","GM1658","GM0794","GM1659","GM0820","GM0823","GM1667","GM0847","GM0848","GM0858","GM0861","GM0866","GM1706")
RPAs <- c("RP25","RP26","RP27","RP28","RP30")
|
################################################################################
# AUTHOR: Rhys Jan van den Handel
# DATE: November 2020
# Name: FIFA 2019 Player Ratings
################################################################################
################################################################################
# ------------------------------------------------------------------------------
# PROJECT PREPARATION
# ------------------------------------------------------------------------------
################################################################################
##########################################################
# PACKAGE LOADING
##########################################################
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(gsubfn)) install.packages("gsubfn", repos = "http://cran.us.r-project.org")
if(!require(stringr)) install.packages("stringr", repos = "http://cran.us.r-project.org")
if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(data.table)
library(gsubfn)
library(stringr)
library(dplyr)
##########################################################
# LOAD DATA
##########################################################
#Download Data from internet
temp <- tempfile()
url <- "https://www.kaggle.com/karangadiya/fifa19/download/archive.zip"
download.file(url, temp)
unzip(temp, "data.csv")
data<-read.csv("data.csv", header = TRUE)
unlink(temp)
#Read the data in from project
readcsv <- read.csv(".\\data.csv", header = TRUE)
#Replace data with readcsv for using project dataset
PlayerData <- data.frame(data)
#view the dataset
head(PlayerData)
any(is.na(PlayerData)) #True: therefore there are NA's
nrow(PlayerData) # 18207
#Remove the NA's
PlayerData <- PlayerData %>% drop_na()
nrow(PlayerData) # 18147 Therefore, only 60 rows dropped
##########################################################
# LOAD INTO WORKING AND VALIDATION SET
##########################################################
# Validation set will be 10% of the dataset
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = PlayerData$Overall, times = 1, p = 0.1, list = FALSE)
players <- PlayerData[-test_index,]
validation <- PlayerData[test_index,]
################################################################################
# ------------------------------------------------------------------------------
# DATA ANALYSIS AND PREPARATION
# ------------------------------------------------------------------------------
################################################################################
##########################################################
# DATA EXPLORATION
##########################################################
#Checking the players data set
names(players)
head(players)
any(is.na(players))
#Distribution of player ratings
hist(players$Overall)
#Summary statistics of rating
summary(players$Overall)
#--- AGE SUMMARY -----------------------------------------
#Distribution of player ages
hist(players$Age)
ages <- players %>% group_by(Age) %>%
summarize(n=n(),mean_rate=mean(Overall))
#Plot of number of players and average rating by age
ages %>% ggplot(aes(x=Age)) +
geom_point(aes(y=n/20), color="blue") +
geom_point(aes(y=mean_rate), color="red")+
scale_y_continuous(name = "Mean Rating",sec.axis = sec_axis(~.*20, name="Number Players")) +
ggtitle("Average Ratings and Number of Players by Age")
#--- Height and Weight -----------------------------------------
hwcols <- c("Name","Age","Overall","Height","Weight","Body.Type")
hw <- players %>% select(hwcols)
temp <- hw %>% mutate(Weight=as.numeric(str_sub(Weight,1,-4)),ft=as.numeric(str_sub(Height,1,1)),inc=as.numeric(str_sub(Height,3,-1)))
hw <- temp %>% mutate(Height=((inc*0.0254)+(ft*0.3048))) %>% select(hwcols)
#Distribution of height and weight
hist(hw$Height)
hist(hw$Weight)
#Height and Weight vs Rating
hw_rating <- hw %>% group_by(Overall) %>%
summarize(n=n(),height=mean(Height),weight=mean(Weight))
hw_rating %>% ggplot(aes(x=Overall)) +
geom_point(aes(y=weight/100), color="blue") +
geom_point(aes(y=height), color="red")+
scale_y_continuous(name = "Mean Height (Red)",sec.axis = sec_axis(~.*100, name="Mean weight (Blue)")) +
ggtitle("Average Height and Weight of Players by Rating")
#So there is is a correlation in Weight but not in height
boxplot(hw$Overall~hw$Body.Type)
#Body Type does not give anything useful. The player names as body types are not useful and will make using it to predict ratings difficult.
#--- Nationality -----------------------------------------
nation <- players %>% group_by(Nationality) %>%
summarize(n=n(),rating=mean(Overall))
summary(nation$n)
#Nations with most players
nation %>% arrange(desc(n)) %>%
top_n(10,n)
#Nations with least players
nation %>% arrange((n)) %>%
top_n(10,-n)
#Nations with best players
nation %>% filter(n>12) %>%
arrange(desc(rating)) %>%
top_n(10,rating)
#Nations with worst players
nation %>% filter(n>12) %>%
arrange((rating)) %>%
top_n(10,-rating)
#Nationality has an impact but due to the inconsistent number of players of each nation it would be difficult to use.
#--- Clubs -----------------------------------------
clubs <- players %>% group_by(Club) %>%
summarize(n=n(),rating=mean(Overall))
summary(clubs$n)
#Clubs with most players
clubs %>% arrange(desc(n)) %>%
top_n(10,n)
#Clubs with least players
clubs %>% arrange((n)) %>%
top_n(10,-n)
#Clubs with best players
clubs %>% arrange(desc(rating)) %>%
top_n(10,rating)
#Clubs with worst players
clubs %>% arrange((rating)) %>%
top_n(10,-rating)
#Clubs are definitely a good option for training
#--- Jersey.Number -----------------------------------------
Jersey <- players %>% group_by(Jersey.Number) %>%
summarize(n=n(),rating=mean(Overall))
summary(Jersey$n)
#Clubs with most players
Jersey %>% arrange(desc(n)) %>%
top_n(10,n)
#Clubs with least players
Jersey %>% arrange((n)) %>%
top_n(10,-n)
#Clubs with best players
Jersey %>% arrange(desc(rating)) %>%
top_n(10,rating)
#Clubs with worst players
Jersey %>% arrange((rating)) %>%
top_n(10,-rating)
#Jersey Numbers are definitely a good option for training. However, due to non linearity regularization should be used
#--- Position and Skills -----------------------------------------
pscols <- c("Name","Age","Overall","Special","International.Reputation","Weak.Foot","Skill.Moves","Work.Rate","Position")
ps <- players %>% select(pscols)
#Work.Rate is not an easy item to quantify
boxplot(ps$Overall~ps$Position)
boxplot(ps$Overall~ps$International.Reputation)
boxplot(ps$Overall~ps$Weak.Foot)
boxplot(ps$Overall~ps$Skill.Moves)
#Position is not helpful but International Reputation, Weak foot and Skill moves Will be useful.
#--- MONETARY VALUES -----------------------------------------
moncols <- c("Name","Age","Overall","Value","Wage","Release.Clause")
money <- players %>% select(moncols)
temp <- money %>% mutate(value_unit=(str_sub(Value,-1,-1)),value_euro=as.numeric(str_sub(Value,4,-2)),wage_unit=(str_sub(Wage,-1,-1)),wage_euro=as.numeric(str_sub(Wage,4,-2)),release_unit=(str_sub(Release.Clause,-1,-1)),release_euro=as.numeric(str_sub(Release.Clause,4,-2)))
money <- temp %>% mutate(Value=ifelse(value_unit=="K",value_euro*1000,value_euro*1000000),Wage=ifelse(wage_unit=="K",wage_euro*1000,wage_euro*1000000),Release.Clause=ifelse(release_unit=="K",release_euro*1000,release_euro*1000000)) %>%
select(moncols)
money[is.na(money)] <- 0
money_rating <- money %>% group_by(Overall) %>%
summarize(n=n(),value=mean(Value),wage=mean(Wage),release=mean(Release.Clause))
plot(money_rating$Overall,money_rating$wage)
plot(money_rating$Overall,money_rating$value)
plot(money_rating$Overall,money_rating$release)
#All Monetary values have a large impact at higher overall ratings.
################################################################################
# ------------------------------------------------------------------------------
# PREDICTIVE MODEL
# ------------------------------------------------------------------------------
################################################################################
##########################################################
# DATA PREPARATION
##########################################################
#--- Columns to be used ---
header <- c("Name","Age","Overall","Weight","Value","Wage","Release.Clause","International.Reputation","Weak.Foot","Skill.Moves","Club","Jersey.Number")
players <- players %>% select(header)
validation <- validation %>% select(header)
#--- players data set ---
#Fix monetary formatting
temp <- players %>% mutate(value_unit=(str_sub(Value,-1,-1)),value_euro=as.numeric(str_sub(Value,4,-2)),wage_unit=(str_sub(Wage,-1,-1)),wage_euro=as.numeric(str_sub(Wage,4,-2)),release_unit=(str_sub(Release.Clause,-1,-1)),release_euro=as.numeric(str_sub(Release.Clause,4,-2)))
players <- temp %>% mutate(Value=ifelse(value_unit=="K",value_euro*1000,value_euro*1000000),Wage=ifelse(wage_unit=="K",wage_euro*1000,wage_euro*1000000),Release.Clause=ifelse(release_unit=="K",release_euro*1000,release_euro*1000000)) %>%
select(header)
temp <- validation %>% mutate(value_unit=(str_sub(Value,-1,-1)),value_euro=as.numeric(str_sub(Value,4,-2)),wage_unit=(str_sub(Wage,-1,-1)),wage_euro=as.numeric(str_sub(Wage,4,-2)),release_unit=(str_sub(Release.Clause,-1,-1)),release_euro=as.numeric(str_sub(Release.Clause,4,-2)))
validation <- temp %>% mutate(Value=ifelse(value_unit=="K",value_euro*1000,value_euro*1000000),Wage=ifelse(wage_unit=="K",wage_euro*1000,wage_euro*1000000),Release.Clause=ifelse(release_unit=="K",release_euro*1000,release_euro*1000000)) %>%
select(header)
#Fix weight format
temp <- players %>% mutate(Weight=as.numeric(str_sub(Weight,1,-4)))
players <- temp %>% select(header)
temp <- validation %>% mutate(Weight=as.numeric(str_sub(Weight,1,-4)))
validation <- temp %>% select(header)
#Ensure N/As are set as 0
players[is.na(players)] <- 0
validation[is.na(validation)] <- 0
#--- View Data set ---
#players
head(players)
nrow(players) #Should be 16331
any(is.na(players))
# validation
head(validation)
nrow(validation) #Should be 1816
any(is.na(validation))
#--- Clear Memory ---
rm(ages,clubs,hw,hw_rating,money,money_rating,nation,PlayerData,ps,readcsv,temp,Jersey)
##########################################################
# LOAD INTO TEST AND TRAIN DATA
##########################################################
# Test set will be 10% of the dataset
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = players$Overall, times = 1, p = 0.1, list = FALSE)
train_set <- players[-test_index,]
test_set <- players[test_index,]
##########################################################
# BASIC STATISTICS
##########################################################
summary(train_set$Overall)
mu <- mean(train_set$Overall)
med <- median(train_set$Overall)
##########################################################
# MEAN MODEL
##########################################################
#Running an RMSE on mean
RMSE_mu <- sqrt(mean((test_set$Overall-mu)^2))
RMSE_mu
#Absolute Error
AbsError_mu <- mean(abs(mu-test_set$Overall))
AbsError_mu
##########################################################
# CARET GLM MODEL
##########################################################
#--- AGE AND WEIGHT PREDICTION------------------------------#
#run a GLM
fit_aw <- train(Overall~Age+Weight,data=train_set, method = "glm")
fit_aw
train_pred_aw <-predict(fit_aw,train_set)
pred_aw <-predict(fit_aw,test_set)
RMSE_aw <- sqrt(mean((test_set$Overall-pred_aw)^2))
RMSE_aw
#Absolute Error
AbsError_aw <- mean(abs(pred_aw-test_set$Overall))
AbsError_aw
#--- SKILL, WEAK FOOT AND REPUTATION PREDICTION ----------#
#run a GLM
fit_swr <- train(Overall~Skill.Moves+Weak.Foot+International.Reputation,data=train_set, method = "glm")
fit_swr
train_pred_swr <-predict(fit_swr,train_set)
pred_swr <-predict(fit_swr,test_set)
RMSE_swr <- sqrt(mean((test_set$Overall-pred_swr)^2))
RMSE_swr
#Absolute Error
AbsError_swr <- mean(abs(pred_swr-test_set$Overall))
AbsError_swr
#--- MONETARY PREDICTION ------------------------------#
#run a GLM
fit_mon <- train(Overall~Value + Wage + Release.Clause,data=train_set, method = "glm")
fit_mon
train_pred_mon <-predict(fit_mon,train_set)
pred_mon <-predict(fit_mon,test_set)
#Running an RMSE to view the error
RMSE_mon_glm <- sqrt(mean((test_set$Overall-pred_mon)^2))
RMSE_mon_glm
#Absolute Error
AbsError_mon <- mean(abs(pred_mon-test_set$Overall))
AbsError_mon
##########################################################
# REGULARISATION
##########################################################
#--- Club -----------------------------------------------#
#Create the regularized for sum and mean
club <- train_set %>% group_by(Club) %>%
summarize(n=n(),rsum=sum(Overall-mu))
t_club <- test_set %>% left_join(club,by='Club')
train_club <- train_set %>% left_join(club,by='Club')
#Sample size regularization accounting for sample size n
t_club <- t_club %>% mutate(b_club=rsum/n)
RMSE_club_nopen <- sqrt(mean((test_set$Overall-(mu+t_club$b_club))^2))
RMSE_club_nopen
#regularization optimized with penalty term p
p <- seq(-10,20)
#sapply the terms
RMSEs <- sapply(p,function(p){
club <- club %>% mutate(b_club=rsum/(n+p))
train_club <- train_set %>% left_join(club,by='Club')
sqrt(mean((train_set$Overall-(mu+train_club$b_club))^2))
})
#plot outputs
plot(p,RMSEs)
#Improve accuracy
p <- seq(-5,5,0.2)
#sapply the terms
RMSEs <- sapply(p,function(p){
club <- club %>% mutate(b_club=rsum/(n+p))
train_club <- train_set %>% left_join(club,by='Club')
sqrt(mean((train_set$Overall-(mu+train_club$b_club))^2))
})
#plot outputs
plot(p,RMSEs)
p_club <- p[which.min(RMSEs)]
p_club
#final optimized output
t_club <- t_club %>% mutate(b_club=rsum/(n+p_club))
RMSE_club <- sqrt(mean((test_set$Overall-(mu+t_club$b_club))^2))
RMSE_club
#Absolute Error
AbsError_club <- mean(abs((mu+t_club$b_club)-test_set$Overall))
AbsError_club
#--- Jersey Number -----------------------------------------------#
#Create the regularized for sum and mean
Jersey <- train_set %>% group_by(Jersey.Number) %>%
summarize(n=n(),rsum=sum(Overall-mu))
t_jersey <- test_set %>% left_join(Jersey,by='Jersey.Number')
train_jn <- train_set %>% left_join(Jersey,by='Jersey.Number')
#Sample size regularization accounting for sample size n
t_jersey <- t_jersey %>% mutate(b_jersey=rsum/n)
RMSE_jn_nopen <- sqrt(mean((test_set$Overall-(mu+t_jersey$b_jersey))^2))
RMSE_jn_nopen
#regularization optimized with penalty term p
p <- seq(-10,20)
#sapply the terms
RMSEs <- sapply(p,function(p){
Jersey <- Jersey %>% mutate(b_jersey=rsum/(n+p))
train_jn <- train_set %>% left_join(Jersey,by='Jersey.Number')
sqrt(mean((train_set$Overall-(mu+train_jn$b_jersey))^2))
})
#plot outputs
plot(p,RMSEs)
#Improve accuracy
p <- seq(-1,5,0.2)
#sapply the terms
RMSEs <- sapply(p,function(p){
Jersey <- Jersey %>% mutate(b_jersey=rsum/(n+p))
train_jn <- train_set %>% left_join(Jersey,by='Jersey.Number')
sqrt(mean((train_set$Overall-(mu+train_jn$b_jersey))^2))
})
#plot outputs
plot(p,RMSEs)
p_jersey <- p[which.min(RMSEs)]
p_jersey
#final optimized output
t_jersey <- t_jersey %>% mutate(b_jersey=rsum/(n+p_jersey))
RMSE_jn <- sqrt(mean((test_set$Overall-(mu+t_jersey$b_jersey))^2))
RMSE_jn
#Absolute Error
AbsError_jn <- mean(abs((mu+t_jersey$b_jersey)-test_set$Overall))
AbsError_jn
###############################################################################
# COMBINED REG
###############################################################################
#Create b_sum to hold all previous prediction values for tuning
b_sum = train_pred_mon
#Combine monetary and age weight values (Note these already contain mu)
train_b_sum = (train_pred_mon + train_pred_aw + train_pred_swr)/3
b_sum = (pred_mon + pred_aw + pred_swr)/3
#Using a more powerful computer to run all
fit_glm <- train(Overall~Age+Weight+Skill.Moves+Weak.Foot+International.Reputation+Value + Wage + Release.Clause,data=train_set, method = "glm")
train_b_sum <-predict(fit_glm,train_set)
b_sum <-predict(fit_glm,test_set)
#Results of the glm combined
RMSE_glm <- sqrt(mean((test_set$Overall-b_sum)^2))
RMSE_glm
#Absolute Error
AbsError_glm <- mean(abs(b_sum-test_set$Overall))
AbsError_glm
#COMBINE CLUB INTO MODEL WITH TUNING
#regularization optimized with penalty term p
p <- seq(0,90)
#sapply the terms
RMSEs <- sapply(p,function(p){
club <- club %>% mutate(b_club=rsum/(n+p))
train_club <- train_set %>% left_join(club,by='Club')
sqrt(mean((train_set$Overall-(train_b_sum+train_club$b_club))^2))
})
#plot outputs
plot(p,RMSEs)
p_club <- p[which.min(RMSEs)]
#final optimized output
t_club <- t_club %>% mutate(b_club=rsum/(n+p_club))
train_club <- train_club %>% mutate(b_club=rsum/(n+p_club))
b_sum=b_sum+t_club$b_club
train_b_sum=train_b_sum+train_club$b_club
#COMBINE Jersey Number INTO MODEL WITH TUNING
#regularization optimized with penalty term p
p <- seq(1000,2000)
#sapply the terms
RMSEs <- sapply(p,function(p){
Jersey <- Jersey %>% mutate(b_jersey=rsum/(n+p))
train_jn <- train_set %>% left_join(Jersey,by='Jersey.Number')
sqrt(mean((train_set$Overall-(train_b_sum+train_jn$b_jersey))^2))
})
#As the sequence gets larger the accuracy improvement is less effective therefore will not be used.
#plot outputs
plot(p,RMSEs)
p_jersey <- p[which.min(RMSEs)]
#final optimized output
t_jersey <- t_jersey %>% mutate(b_jersey=rsum/(n+p_jersey))
train_jn <- train_jn %>% mutate(b_jersey=rsum/(n+p_jersey))
b_sum=b_sum+t_jersey$b_jersey
train_b_sum=train_b_sum+train_jn$b_jersey
RMSE_combined <- sqrt(mean((test_set$Overall-b_sum)^2))
RMSE_combined
#Combined Error
AbsError_combined <- mean(abs(b_sum-test_set$Overall))
AbsError_combined
#Accuracy of prediction
acc <- round(b_sum,0) == test_set$Overall
mean(acc)*100
#Distribution of error
Error_Combined <-b_sum-test_set$Overall
hist(Error_Combined)
###############################################################################
# RESULTS
###############################################################################
#Build results table
results_table <- data.frame(Method='Mean Prediction', RMSE = RMSE_mu ,Error = AbsError_mu)
results_table <- bind_rows(results_table,data.frame(Method='Physical Prediction', RMSE = RMSE_aw ,Error = AbsError_aw))
results_table <- bind_rows(results_table,data.frame(Method='Club Prediction', RMSE = RMSE_club ,Error = AbsError_club))
results_table <- bind_rows(results_table,data.frame(Method='Jersey Number Prediction', RMSE = RMSE_jn ,Error = AbsError_jn))
results_table <- bind_rows(results_table,data.frame(Method='Simple Attributes Prediction', RMSE = RMSE_swr ,Error = AbsError_swr))
results_table <- bind_rows(results_table,data.frame(Method='Monetary Prediction', RMSE = RMSE_mon_glm ,Error = AbsError_mon))
results_table <- bind_rows(results_table,data.frame(Method='Combined GLM Results', RMSE = RMSE_glm,Error = AbsError_glm))
results_table <- bind_rows(results_table,data.frame(Method='Combined Results', RMSE = RMSE_combined,Error = AbsError_combined))
results_table
################################################################################
# ------------------------------------------------------------------------------
# VALIDATION
# ------------------------------------------------------------------------------
################################################################################
#individual glm models
val_mon <-predict(fit_mon,validation)
val_aw <-predict(fit_aw,validation)
val_swr <-predict(fit_swr,validation)
#Club regularization
club <- train_set %>% group_by(Club) %>%
summarize(n=n(),rsum=sum(Overall-mu))
val_club <- validation %>% left_join(club,by='Club')
val_club <- val_club %>% mutate(b_club=rsum/(n+p_club))
#Jersey Number regularization
Jersey <- train_set %>% group_by(Jersey.Number) %>%
summarize(n=n(),rsum=sum(Overall-mu))
val_jn <- validation %>% left_join(Jersey,by='Jersey.Number')
val_jn <- val_club %>% mutate(b_jersey=rsum/(n+p_jersey))
#Combine results
val_b_sum <- (val_mon + val_aw + val_swr)/3
#With a more powerful computer the full glm can be run
val_glm <-predict(fit_glm,validation)
val_b_sum <- val_glm
val_b_sum <- val_b_sum+val_club$b_club+val_jn$b_jersey
#Final result
RMSE_Final <- sqrt(mean((validation$Overall-val_b_sum)^2))
RMSE_Final
#Final Error
AbsError_Final <- mean(abs(val_b_sum-validation$Overall))
AbsError_Final
#Accuracy of prediction
acc <- round(val_b_sum,0) == validation$Overall
mean(acc)*100
#Distribution of error
val_error <- val_b_sum-validation$Overall
hist(val_error)
#Save into Results table
results_table <- bind_rows(results_table,data.frame(Method='Validation Results', RMSE = RMSE_Final, Error = AbsError_Final))
results_table
| /FIFA_Player_Ratings/FIFA_2019_Player_Ratings_2.R | no_license | rhysvdh/EdxDataScience | R | false | false | 21,920 | r | ################################################################################
# AUTHOR: Rhys Jan van den Handel
# DATE: November 2020
# Name: FIFA 2019 Player Ratings
################################################################################
################################################################################
# ------------------------------------------------------------------------------
# PROJECT PREPARATION
# ------------------------------------------------------------------------------
################################################################################
##########################################################
# PACKAGE LOADING
##########################################################
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(gsubfn)) install.packages("gsubfn", repos = "http://cran.us.r-project.org")
if(!require(stringr)) install.packages("stringr", repos = "http://cran.us.r-project.org")
if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(data.table)
library(gsubfn)
library(stringr)
library(dplyr)
##########################################################
# LOAD DATA
##########################################################
#Download Data from internet
temp <- tempfile()
url <- "https://www.kaggle.com/karangadiya/fifa19/download/archive.zip"
download.file(url, temp)
unzip(temp, "data.csv")
data<-read.csv("data.csv", header = TRUE)
unlink(temp)
#Read the data in from project
readcsv <- read.csv(".\\data.csv", header = TRUE)
#Replace data with readcsv for using project dataset
PlayerData <- data.frame(data)
#view the dataset
head(PlayerData)
any(is.na(PlayerData)) #True: therefore there are NA's
nrow(PlayerData) # 18207
#Remove the NA's
PlayerData <- PlayerData %>% drop_na()
nrow(PlayerData) # 18147 Therefore, only 60 rows dropped
##########################################################
# LOAD INTO WORKING AND VALIDATION SET
##########################################################
# Validation set will be 10% of the dataset
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = PlayerData$Overall, times = 1, p = 0.1, list = FALSE)
players <- PlayerData[-test_index,]
validation <- PlayerData[test_index,]
################################################################################
# ------------------------------------------------------------------------------
# DATA ANALYSIS AND PREPARATION
# ------------------------------------------------------------------------------
################################################################################
##########################################################
# DATA EXPLORATION
##########################################################
#Checking the players data set
names(players)
head(players)
any(is.na(players))
#Distribution of player ratings
hist(players$Overall)
#Summary statistics of rating
summary(players$Overall)
#--- AGE SUMMARY -----------------------------------------
#Distribution of player ages
hist(players$Age)
ages <- players %>% group_by(Age) %>%
summarize(n=n(),mean_rate=mean(Overall))
#Plot of number of players and average rating by age
ages %>% ggplot(aes(x=Age)) +
geom_point(aes(y=n/20), color="blue") +
geom_point(aes(y=mean_rate), color="red")+
scale_y_continuous(name = "Mean Rating",sec.axis = sec_axis(~.*20, name="Number Players")) +
ggtitle("Average Ratings and Number of Players by Age")
#--- Height and Weight -----------------------------------------
hwcols <- c("Name","Age","Overall","Height","Weight","Body.Type")
hw <- players %>% select(hwcols)
temp <- hw %>% mutate(Weight=as.numeric(str_sub(Weight,1,-4)),ft=as.numeric(str_sub(Height,1,1)),inc=as.numeric(str_sub(Height,3,-1)))
hw <- temp %>% mutate(Height=((inc*0.0254)+(ft*0.3048))) %>% select(hwcols)
#Distribution of height and weight
hist(hw$Height)
hist(hw$Weight)
#Height and Weight vs Rating
hw_rating <- hw %>% group_by(Overall) %>%
summarize(n=n(),height=mean(Height),weight=mean(Weight))
hw_rating %>% ggplot(aes(x=Overall)) +
geom_point(aes(y=weight/100), color="blue") +
geom_point(aes(y=height), color="red")+
scale_y_continuous(name = "Mean Height (Red)",sec.axis = sec_axis(~.*100, name="Mean weight (Blue)")) +
ggtitle("Average Height and Weight of Players by Rating")
#So there is is a correlation in Weight but not in height
boxplot(hw$Overall~hw$Body.Type)
#Body Type does not give anything useful. The player names as body types are not useful and will make using it to predict ratings difficult.
#--- Nationality -----------------------------------------
nation <- players %>% group_by(Nationality) %>%
summarize(n=n(),rating=mean(Overall))
summary(nation$n)
#Nations with most players
nation %>% arrange(desc(n)) %>%
top_n(10,n)
#Nations with least players
nation %>% arrange((n)) %>%
top_n(10,-n)
#Nations with best players
nation %>% filter(n>12) %>%
arrange(desc(rating)) %>%
top_n(10,rating)
#Nations with worst players
nation %>% filter(n>12) %>%
arrange((rating)) %>%
top_n(10,-rating)
#Nationality has an impact but due to the inconsistent number of players of each nation it would be difficult to use.
#--- Clubs -----------------------------------------
clubs <- players %>% group_by(Club) %>%
summarize(n=n(),rating=mean(Overall))
summary(clubs$n)
#Clubs with most players
clubs %>% arrange(desc(n)) %>%
top_n(10,n)
#Clubs with least players
clubs %>% arrange((n)) %>%
top_n(10,-n)
#Clubs with best players
clubs %>% arrange(desc(rating)) %>%
top_n(10,rating)
#Clubs with worst players
clubs %>% arrange((rating)) %>%
top_n(10,-rating)
#Clubs are definitely a good option for training
#--- Jersey.Number -----------------------------------------
Jersey <- players %>% group_by(Jersey.Number) %>%
summarize(n=n(),rating=mean(Overall))
summary(Jersey$n)
#Clubs with most players
Jersey %>% arrange(desc(n)) %>%
top_n(10,n)
#Clubs with least players
Jersey %>% arrange((n)) %>%
top_n(10,-n)
#Clubs with best players
Jersey %>% arrange(desc(rating)) %>%
top_n(10,rating)
#Clubs with worst players
Jersey %>% arrange((rating)) %>%
top_n(10,-rating)
#Jersey Numbers are definitely a good option for training. However, due to non linearity regularization should be used
#--- Position and Skills -----------------------------------------
pscols <- c("Name","Age","Overall","Special","International.Reputation","Weak.Foot","Skill.Moves","Work.Rate","Position")
ps <- players %>% select(pscols)
#Work.Rate is not an easy item to quantify
boxplot(ps$Overall~ps$Position)
boxplot(ps$Overall~ps$International.Reputation)
boxplot(ps$Overall~ps$Weak.Foot)
boxplot(ps$Overall~ps$Skill.Moves)
#Position is not helpful but International Reputation, Weak foot and Skill moves Will be useful.
#--- MONETARY VALUES -----------------------------------------
moncols <- c("Name","Age","Overall","Value","Wage","Release.Clause")
money <- players %>% select(moncols)
temp <- money %>% mutate(value_unit=(str_sub(Value,-1,-1)),value_euro=as.numeric(str_sub(Value,4,-2)),wage_unit=(str_sub(Wage,-1,-1)),wage_euro=as.numeric(str_sub(Wage,4,-2)),release_unit=(str_sub(Release.Clause,-1,-1)),release_euro=as.numeric(str_sub(Release.Clause,4,-2)))
money <- temp %>% mutate(Value=ifelse(value_unit=="K",value_euro*1000,value_euro*1000000),Wage=ifelse(wage_unit=="K",wage_euro*1000,wage_euro*1000000),Release.Clause=ifelse(release_unit=="K",release_euro*1000,release_euro*1000000)) %>%
select(moncols)
money[is.na(money)] <- 0
money_rating <- money %>% group_by(Overall) %>%
summarize(n=n(),value=mean(Value),wage=mean(Wage),release=mean(Release.Clause))
plot(money_rating$Overall,money_rating$wage)
plot(money_rating$Overall,money_rating$value)
plot(money_rating$Overall,money_rating$release)
#All Monetary values have a large impact at higher overall ratings.
################################################################################
# ------------------------------------------------------------------------------
# PREDICTIVE MODEL
# ------------------------------------------------------------------------------
################################################################################
##########################################################
# DATA PREPARATION
##########################################################
#--- Columns to be used ---
header <- c("Name","Age","Overall","Weight","Value","Wage","Release.Clause","International.Reputation","Weak.Foot","Skill.Moves","Club","Jersey.Number")
players <- players %>% select(header)
validation <- validation %>% select(header)
#--- players data set ---
#Fix monetary formatting
temp <- players %>% mutate(value_unit=(str_sub(Value,-1,-1)),value_euro=as.numeric(str_sub(Value,4,-2)),wage_unit=(str_sub(Wage,-1,-1)),wage_euro=as.numeric(str_sub(Wage,4,-2)),release_unit=(str_sub(Release.Clause,-1,-1)),release_euro=as.numeric(str_sub(Release.Clause,4,-2)))
players <- temp %>% mutate(Value=ifelse(value_unit=="K",value_euro*1000,value_euro*1000000),Wage=ifelse(wage_unit=="K",wage_euro*1000,wage_euro*1000000),Release.Clause=ifelse(release_unit=="K",release_euro*1000,release_euro*1000000)) %>%
select(header)
temp <- validation %>% mutate(value_unit=(str_sub(Value,-1,-1)),value_euro=as.numeric(str_sub(Value,4,-2)),wage_unit=(str_sub(Wage,-1,-1)),wage_euro=as.numeric(str_sub(Wage,4,-2)),release_unit=(str_sub(Release.Clause,-1,-1)),release_euro=as.numeric(str_sub(Release.Clause,4,-2)))
validation <- temp %>% mutate(Value=ifelse(value_unit=="K",value_euro*1000,value_euro*1000000),Wage=ifelse(wage_unit=="K",wage_euro*1000,wage_euro*1000000),Release.Clause=ifelse(release_unit=="K",release_euro*1000,release_euro*1000000)) %>%
select(header)
#Fix weight format
temp <- players %>% mutate(Weight=as.numeric(str_sub(Weight,1,-4)))
players <- temp %>% select(header)
temp <- validation %>% mutate(Weight=as.numeric(str_sub(Weight,1,-4)))
validation <- temp %>% select(header)
#Ensure N/As are set as 0
players[is.na(players)] <- 0
validation[is.na(validation)] <- 0
#--- View Data set ---
#players
head(players)
nrow(players) #Should be 16331
any(is.na(players))
# validation
head(validation)
nrow(validation) #Should be 1816
any(is.na(validation))
#--- Clear Memory ---
rm(ages,clubs,hw,hw_rating,money,money_rating,nation,PlayerData,ps,readcsv,temp,Jersey)
##########################################################
# LOAD INTO TEST AND TRAIN DATA
##########################################################
# Test set will be 10% of the dataset
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = players$Overall, times = 1, p = 0.1, list = FALSE)
train_set <- players[-test_index,]
test_set <- players[test_index,]
##########################################################
# BASIC STATISTICS
##########################################################
summary(train_set$Overall)
mu <- mean(train_set$Overall)
med <- median(train_set$Overall)
##########################################################
# MEAN MODEL
##########################################################
#Running an RMSE on mean
RMSE_mu <- sqrt(mean((test_set$Overall-mu)^2))
RMSE_mu
#Absolute Error
AbsError_mu <- mean(abs(mu-test_set$Overall))
AbsError_mu
##########################################################
# CARET GLM MODEL
##########################################################
#--- AGE AND WEIGHT PREDICTION------------------------------#
#run a GLM
fit_aw <- train(Overall~Age+Weight,data=train_set, method = "glm")
fit_aw
train_pred_aw <-predict(fit_aw,train_set)
pred_aw <-predict(fit_aw,test_set)
RMSE_aw <- sqrt(mean((test_set$Overall-pred_aw)^2))
RMSE_aw
#Absolute Error
AbsError_aw <- mean(abs(pred_aw-test_set$Overall))
AbsError_aw
#--- SKILL, WEAK FOOT AND REPUTATION PREDICTION ----------#
#run a GLM
fit_swr <- train(Overall~Skill.Moves+Weak.Foot+International.Reputation,data=train_set, method = "glm")
fit_swr
train_pred_swr <-predict(fit_swr,train_set)
pred_swr <-predict(fit_swr,test_set)
RMSE_swr <- sqrt(mean((test_set$Overall-pred_swr)^2))
RMSE_swr
#Absolute Error
AbsError_swr <- mean(abs(pred_swr-test_set$Overall))
AbsError_swr
#--- MONETARY PREDICTION ------------------------------#
#run a GLM
fit_mon <- train(Overall~Value + Wage + Release.Clause,data=train_set, method = "glm")
fit_mon
train_pred_mon <-predict(fit_mon,train_set)
pred_mon <-predict(fit_mon,test_set)
#Running an RMSE to view the error
RMSE_mon_glm <- sqrt(mean((test_set$Overall-pred_mon)^2))
RMSE_mon_glm
#Absolute Error
AbsError_mon <- mean(abs(pred_mon-test_set$Overall))
AbsError_mon
##########################################################
# REGULARISATION
##########################################################
#--- Club -----------------------------------------------#
#Create the regularized for sum and mean
club <- train_set %>% group_by(Club) %>%
summarize(n=n(),rsum=sum(Overall-mu))
t_club <- test_set %>% left_join(club,by='Club')
train_club <- train_set %>% left_join(club,by='Club')
#Sample size regularization accounting for sample size n
t_club <- t_club %>% mutate(b_club=rsum/n)
RMSE_club_nopen <- sqrt(mean((test_set$Overall-(mu+t_club$b_club))^2))
RMSE_club_nopen
#regularization optimized with penalty term p
p <- seq(-10,20)
#sapply the terms
RMSEs <- sapply(p,function(p){
club <- club %>% mutate(b_club=rsum/(n+p))
train_club <- train_set %>% left_join(club,by='Club')
sqrt(mean((train_set$Overall-(mu+train_club$b_club))^2))
})
#plot outputs
plot(p,RMSEs)
#Improve accuracy
p <- seq(-5,5,0.2)
#sapply the terms
RMSEs <- sapply(p,function(p){
club <- club %>% mutate(b_club=rsum/(n+p))
train_club <- train_set %>% left_join(club,by='Club')
sqrt(mean((train_set$Overall-(mu+train_club$b_club))^2))
})
#plot outputs
plot(p,RMSEs)
p_club <- p[which.min(RMSEs)]
p_club
#final optimized output
t_club <- t_club %>% mutate(b_club=rsum/(n+p_club))
RMSE_club <- sqrt(mean((test_set$Overall-(mu+t_club$b_club))^2))
RMSE_club
#Absolute Error
AbsError_club <- mean(abs((mu+t_club$b_club)-test_set$Overall))
AbsError_club
#--- Jersey Number -----------------------------------------------#
#Create the regularized for sum and mean
Jersey <- train_set %>% group_by(Jersey.Number) %>%
summarize(n=n(),rsum=sum(Overall-mu))
t_jersey <- test_set %>% left_join(Jersey,by='Jersey.Number')
train_jn <- train_set %>% left_join(Jersey,by='Jersey.Number')
#Sample size regularization accounting for sample size n
t_jersey <- t_jersey %>% mutate(b_jersey=rsum/n)
RMSE_jn_nopen <- sqrt(mean((test_set$Overall-(mu+t_jersey$b_jersey))^2))
RMSE_jn_nopen
#regularization optimized with penalty term p
p <- seq(-10,20)
#sapply the terms
RMSEs <- sapply(p,function(p){
Jersey <- Jersey %>% mutate(b_jersey=rsum/(n+p))
train_jn <- train_set %>% left_join(Jersey,by='Jersey.Number')
sqrt(mean((train_set$Overall-(mu+train_jn$b_jersey))^2))
})
#plot outputs
plot(p,RMSEs)
#Improve accuracy
p <- seq(-1,5,0.2)
#sapply the terms
RMSEs <- sapply(p,function(p){
Jersey <- Jersey %>% mutate(b_jersey=rsum/(n+p))
train_jn <- train_set %>% left_join(Jersey,by='Jersey.Number')
sqrt(mean((train_set$Overall-(mu+train_jn$b_jersey))^2))
})
#plot outputs
plot(p,RMSEs)
p_jersey <- p[which.min(RMSEs)]
p_jersey
#final optimized output
t_jersey <- t_jersey %>% mutate(b_jersey=rsum/(n+p_jersey))
RMSE_jn <- sqrt(mean((test_set$Overall-(mu+t_jersey$b_jersey))^2))
RMSE_jn
#Absolute Error
AbsError_jn <- mean(abs((mu+t_jersey$b_jersey)-test_set$Overall))
AbsError_jn
###############################################################################
# COMBINED REG
###############################################################################
#Create b_sum to hold all previous prediction values for tuning
b_sum = train_pred_mon
#Combine monetary and age weight values (Note these already contain mu)
train_b_sum = (train_pred_mon + train_pred_aw + train_pred_swr)/3
b_sum = (pred_mon + pred_aw + pred_swr)/3
#Using a more powerful computer to run all
fit_glm <- train(Overall~Age+Weight+Skill.Moves+Weak.Foot+International.Reputation+Value + Wage + Release.Clause,data=train_set, method = "glm")
train_b_sum <-predict(fit_glm,train_set)
b_sum <-predict(fit_glm,test_set)
#Results of the glm combined
RMSE_glm <- sqrt(mean((test_set$Overall-b_sum)^2))
RMSE_glm
#Absolute Error
AbsError_glm <- mean(abs(b_sum-test_set$Overall))
AbsError_glm
#COMBINE CLUB INTO MODEL WITH TUNING
#regularization optimized with penalty term p
p <- seq(0,90)
#sapply the terms
RMSEs <- sapply(p,function(p){
club <- club %>% mutate(b_club=rsum/(n+p))
train_club <- train_set %>% left_join(club,by='Club')
sqrt(mean((train_set$Overall-(train_b_sum+train_club$b_club))^2))
})
#plot outputs
plot(p,RMSEs)
p_club <- p[which.min(RMSEs)]
#final optimized output
t_club <- t_club %>% mutate(b_club=rsum/(n+p_club))
train_club <- train_club %>% mutate(b_club=rsum/(n+p_club))
b_sum=b_sum+t_club$b_club
train_b_sum=train_b_sum+train_club$b_club
#COMBINE Jersey Number INTO MODEL WITH TUNING
#regularization optimized with penalty term p
p <- seq(1000,2000)
#sapply the terms
RMSEs <- sapply(p,function(p){
Jersey <- Jersey %>% mutate(b_jersey=rsum/(n+p))
train_jn <- train_set %>% left_join(Jersey,by='Jersey.Number')
sqrt(mean((train_set$Overall-(train_b_sum+train_jn$b_jersey))^2))
})
#As the sequence gets larger the accuracy improvement is less effective therefore will not be used.
#plot outputs
plot(p,RMSEs)
p_jersey <- p[which.min(RMSEs)]
#final optimized output
t_jersey <- t_jersey %>% mutate(b_jersey=rsum/(n+p_jersey))
train_jn <- train_jn %>% mutate(b_jersey=rsum/(n+p_jersey))
b_sum=b_sum+t_jersey$b_jersey
train_b_sum=train_b_sum+train_jn$b_jersey
RMSE_combined <- sqrt(mean((test_set$Overall-b_sum)^2))
RMSE_combined
#Combined Error
AbsError_combined <- mean(abs(b_sum-test_set$Overall))
AbsError_combined
#Accuracy of prediction
acc <- round(b_sum,0) == test_set$Overall
mean(acc)*100
#Distribution of error
Error_Combined <-b_sum-test_set$Overall
hist(Error_Combined)
###############################################################################
# RESULTS
###############################################################################
#Build results table
results_table <- data.frame(Method='Mean Prediction', RMSE = RMSE_mu ,Error = AbsError_mu)
results_table <- bind_rows(results_table,data.frame(Method='Physical Prediction', RMSE = RMSE_aw ,Error = AbsError_aw))
results_table <- bind_rows(results_table,data.frame(Method='Club Prediction', RMSE = RMSE_club ,Error = AbsError_club))
results_table <- bind_rows(results_table,data.frame(Method='Jersey Number Prediction', RMSE = RMSE_jn ,Error = AbsError_jn))
results_table <- bind_rows(results_table,data.frame(Method='Simple Attributes Prediction', RMSE = RMSE_swr ,Error = AbsError_swr))
results_table <- bind_rows(results_table,data.frame(Method='Monetary Prediction', RMSE = RMSE_mon_glm ,Error = AbsError_mon))
results_table <- bind_rows(results_table,data.frame(Method='Combined GLM Results', RMSE = RMSE_glm,Error = AbsError_glm))
results_table <- bind_rows(results_table,data.frame(Method='Combined Results', RMSE = RMSE_combined,Error = AbsError_combined))
results_table
################################################################################
# ------------------------------------------------------------------------------
# VALIDATION
# ------------------------------------------------------------------------------
################################################################################
#individual glm models
val_mon <-predict(fit_mon,validation)
val_aw <-predict(fit_aw,validation)
val_swr <-predict(fit_swr,validation)
#Club regularization
club <- train_set %>% group_by(Club) %>%
summarize(n=n(),rsum=sum(Overall-mu))
val_club <- validation %>% left_join(club,by='Club')
val_club <- val_club %>% mutate(b_club=rsum/(n+p_club))
#Jersey Number regularization
Jersey <- train_set %>% group_by(Jersey.Number) %>%
summarize(n=n(),rsum=sum(Overall-mu))
val_jn <- validation %>% left_join(Jersey,by='Jersey.Number')
val_jn <- val_club %>% mutate(b_jersey=rsum/(n+p_jersey))
#Combine results
val_b_sum <- (val_mon + val_aw + val_swr)/3
#With a more powerful computer the full glm can be run
val_glm <-predict(fit_glm,validation)
val_b_sum <- val_glm
val_b_sum <- val_b_sum+val_club$b_club+val_jn$b_jersey
#Final result
RMSE_Final <- sqrt(mean((validation$Overall-val_b_sum)^2))
RMSE_Final
#Final Error
AbsError_Final <- mean(abs(val_b_sum-validation$Overall))
AbsError_Final
#Accuracy of prediction
acc <- round(val_b_sum,0) == validation$Overall
mean(acc)*100
#Distribution of error
val_error <- val_b_sum-validation$Overall
hist(val_error)
#Save into Results table
results_table <- bind_rows(results_table,data.frame(Method='Validation Results', RMSE = RMSE_Final, Error = AbsError_Final))
results_table
|
#BAIRT
#Bayesian Analysis of Item Response Theory Models
#Autor: Javier Martinez <martinezjavier243@gmail.com>
#
#object.coda
#
###############################################################################
###############################################################################
#' @title Creating an mcmc.list for coda package
#'
#' @description
#' The function \emph{object.coda} create a \emph{mcmc.list} object.
#' With this is possible to study the chain using the coda packet.
#'
#' @param mcmclist A \emph{mcmc.2pnob} or \emph{mcmc.3pnob} class object.
#' @param ... Further arguments.
#'
#' @return
#' A \emph{mcmc.list} coda packet object.
#'
#' @details
#' The function \emph{object.coda} create a \emph{mcmc.list} object of the
#' marginal chain selectionated. The marginal chain is splited in subchains
#' determined by \emph{parts}. The aim is represent parallel chains with
#' different starting values (Beguin & Glas, 2001, p. 547).
#'
#' @author
#' Javier Martínez
#'
#' @references
#' A.A. Beguin, A, A. & Glas, C.A.W. (2001). MCMC Estimation and Some
#' Model-Fit Analysis of Multidimensional IRT Models. Psychometrika,
#' 66, 541-562.
#'
#' @seealso
#' \code{\link[coda]{as.mcmc.list}} and \code{\link[coda]{as.mcmc}}.
#'
#' @export object.coda
#' @exportMethod object.coda
object.coda <- function(mcmclist, ...) UseMethod("object.coda", mcmclist)
###############################################################################
###############################################################################
#' @title Creating an mcmc.list for coda package
#'
#' @description
#' The function \emph{object.coda} create a \emph{mcmc.list} object.
#' With this is possible to study the chain using the coda packet.
#'
#' @param mcmclist A \emph{mcmc.2pnob} or \emph{mcmc.3pnob} class object.
#' @param ... Further arguments.
#'
##' @return
#' A \emph{mcmc.list} coda packet object.
#'
#' @details
#' The function \emph{object.coda} create a \emph{mcmc.list} object of the
#' marginal chain selectionated. The marginal chain is splited in subchains
#' determined by \emph{parts}. The aim is represent parallel chains with
#' different starting values (Beguin & Glas, 2001, p. 547).
#'
#' @author
#' Javier Martínez
#'
#' @references
#' A.A. Beguin, A, A. & Glas, C.A.W. (2001). MCMC Estimation and Some
#' Model-Fit Analysis of Multidimensional IRT Models. Psychometrika,
#' 66, 541-562.
#'
#' @seealso
#' \code{\link[coda]{as.mcmc.list}} and \code{\link[coda]{as.mcmc}}.
#'
#' @export
object.coda.default <- function(mcmclist, ...) NULL
###############################################################################
###############################################################################
#' @title Creating an mcmc.list for coda package
#'
#' @description
#' The function \emph{object.coda} create a \emph{mcmc.list} object.
#' With this is possible to study the chain using the coda packet.
#'
#' @param mcmclist A \emph{mcmc.2pnob} or \emph{mcmc.3pnob} class object.
#' @param parameter The parameter (a, b, c or theta) for graphing.
#' @param chain The parameter's chain that will be graphed.
#' @param parts Number of splits for MCMC chain.
#' @param ... Further arguments.
#'
#' @return
#' A \emph{mcmc.list} coda packet object.
#'
#' @details
#' The function \emph{object.coda} create a \emph{mcmc.list} object of the
#' marginal chain selectionated. The marginal chain is splited in subchains
#' determined by \emph{parts}. The aim is represent parallel chains with
#' different starting values (Beguin & Glas, 2001, p. 547).
#'
#' @author
#' Javier Martínez
#'
#' @references
#' A.A. Beguin, A, A. & Glas, C.A.W. (2001). MCMC Estimation and Some
#' Model-Fit Analysis of Multidimensional IRT Models. Psychometrika,
#' 66, 541-562.
#'
#' @seealso
#' \code{\link[coda]{as.mcmc.list}} and \code{\link[coda]{as.mcmc}}.
#'
#' @examples
#' # data for model
#' data("MathTest")
#'
#' # Only for the first 500 examinees of the data MathTest
#' # Two-Parameter Normal Ogive Model
#' model2 <- mcmc.2pnob(MathTest[1:500,], iter = 400, burning = 100)
#'
#' chain_a1 <- object.coda(model2, parameter = "a", chain = 1)
#' coda::gelman.plot(chain_a1)
#' coda::gelman.diag(chain_a1)
#' plot(chain_a1)
#'
#' \donttest{
#' # For all examinees of the data MathTest
#' # Three-Parameter Normal Ogive Model
#' # selection of the prior for 5 response options
#' cprior <- select.c.prior(5)
#' model3 <- mcmc.3pnob(MathTest, iter = 3500, burning = 500,
#' c.prior = cprior, parts = 3)
#'
#' chain_c1 <- object.coda(model3, parameter = "c", chain = 1)
#' coda::gelman.plot(chain_c1)
#' coda::gelman.diag(chain_c1)
#' plot(chain_c1)
#' }
#'
#' ## End(Not run)
#'
#'
#' @importFrom coda as.mcmc
#' @importFrom coda mcmc.list
#'
#' @export
object.coda.bairt <- function(mcmclist, parameter = "a", chain = 1,
parts = NULL, ...) {
# Check in ================================================================
.parameter.test(mcmclist, parameter)
.chain.test(mcmclist, parameter, chain)
mcmc.chain <- mcmclist$mcmcobj[[parameter]][, chain]
if (is.null(parts)) {
parts <- mcmclist$information$parts
}
mcmc <- .mcmc.divide(mcmc.chain, parts)
return(mcmc)
}
| /R/object.coda.R | no_license | cran/bairt | R | false | false | 5,282 | r | #BAIRT
#Bayesian Analysis of Item Response Theory Models
#Autor: Javier Martinez <martinezjavier243@gmail.com>
#
#object.coda
#
###############################################################################
###############################################################################
#' @title Creating an mcmc.list for coda package
#'
#' @description
#' The function \emph{object.coda} create a \emph{mcmc.list} object.
#' With this is possible to study the chain using the coda packet.
#'
#' @param mcmclist A \emph{mcmc.2pnob} or \emph{mcmc.3pnob} class object.
#' @param ... Further arguments.
#'
#' @return
#' A \emph{mcmc.list} coda packet object.
#'
#' @details
#' The function \emph{object.coda} create a \emph{mcmc.list} object of the
#' marginal chain selectionated. The marginal chain is splited in subchains
#' determined by \emph{parts}. The aim is represent parallel chains with
#' different starting values (Beguin & Glas, 2001, p. 547).
#'
#' @author
#' Javier Martínez
#'
#' @references
#' A.A. Beguin, A, A. & Glas, C.A.W. (2001). MCMC Estimation and Some
#' Model-Fit Analysis of Multidimensional IRT Models. Psychometrika,
#' 66, 541-562.
#'
#' @seealso
#' \code{\link[coda]{as.mcmc.list}} and \code{\link[coda]{as.mcmc}}.
#'
#' @export object.coda
#' @exportMethod object.coda
object.coda <- function(mcmclist, ...) UseMethod("object.coda", mcmclist)
###############################################################################
###############################################################################
#' @title Creating an mcmc.list for coda package
#'
#' @description
#' The function \emph{object.coda} create a \emph{mcmc.list} object.
#' With this is possible to study the chain using the coda packet.
#'
#' @param mcmclist A \emph{mcmc.2pnob} or \emph{mcmc.3pnob} class object.
#' @param ... Further arguments.
#'
##' @return
#' A \emph{mcmc.list} coda packet object.
#'
#' @details
#' The function \emph{object.coda} create a \emph{mcmc.list} object of the
#' marginal chain selectionated. The marginal chain is splited in subchains
#' determined by \emph{parts}. The aim is represent parallel chains with
#' different starting values (Beguin & Glas, 2001, p. 547).
#'
#' @author
#' Javier Martínez
#'
#' @references
#' A.A. Beguin, A, A. & Glas, C.A.W. (2001). MCMC Estimation and Some
#' Model-Fit Analysis of Multidimensional IRT Models. Psychometrika,
#' 66, 541-562.
#'
#' @seealso
#' \code{\link[coda]{as.mcmc.list}} and \code{\link[coda]{as.mcmc}}.
#'
#' @export
object.coda.default <- function(mcmclist, ...) NULL
###############################################################################
###############################################################################
#' @title Creating an mcmc.list for coda package
#'
#' @description
#' The function \emph{object.coda} create a \emph{mcmc.list} object.
#' With this is possible to study the chain using the coda packet.
#'
#' @param mcmclist A \emph{mcmc.2pnob} or \emph{mcmc.3pnob} class object.
#' @param parameter The parameter (a, b, c or theta) for graphing.
#' @param chain The parameter's chain that will be graphed.
#' @param parts Number of splits for MCMC chain.
#' @param ... Further arguments.
#'
#' @return
#' A \emph{mcmc.list} coda packet object.
#'
#' @details
#' The function \emph{object.coda} create a \emph{mcmc.list} object of the
#' marginal chain selectionated. The marginal chain is splited in subchains
#' determined by \emph{parts}. The aim is represent parallel chains with
#' different starting values (Beguin & Glas, 2001, p. 547).
#'
#' @author
#' Javier Martínez
#'
#' @references
#' A.A. Beguin, A, A. & Glas, C.A.W. (2001). MCMC Estimation and Some
#' Model-Fit Analysis of Multidimensional IRT Models. Psychometrika,
#' 66, 541-562.
#'
#' @seealso
#' \code{\link[coda]{as.mcmc.list}} and \code{\link[coda]{as.mcmc}}.
#'
#' @examples
#' # data for model
#' data("MathTest")
#'
#' # Only for the first 500 examinees of the data MathTest
#' # Two-Parameter Normal Ogive Model
#' model2 <- mcmc.2pnob(MathTest[1:500,], iter = 400, burning = 100)
#'
#' chain_a1 <- object.coda(model2, parameter = "a", chain = 1)
#' coda::gelman.plot(chain_a1)
#' coda::gelman.diag(chain_a1)
#' plot(chain_a1)
#'
#' \donttest{
#' # For all examinees of the data MathTest
#' # Three-Parameter Normal Ogive Model
#' # selection of the prior for 5 response options
#' cprior <- select.c.prior(5)
#' model3 <- mcmc.3pnob(MathTest, iter = 3500, burning = 500,
#' c.prior = cprior, parts = 3)
#'
#' chain_c1 <- object.coda(model3, parameter = "c", chain = 1)
#' coda::gelman.plot(chain_c1)
#' coda::gelman.diag(chain_c1)
#' plot(chain_c1)
#' }
#'
#' ## End(Not run)
#'
#'
#' @importFrom coda as.mcmc
#' @importFrom coda mcmc.list
#'
#' @export
object.coda.bairt <- function(mcmclist, parameter = "a", chain = 1,
parts = NULL, ...) {
# Check in ================================================================
.parameter.test(mcmclist, parameter)
.chain.test(mcmclist, parameter, chain)
mcmc.chain <- mcmclist$mcmcobj[[parameter]][, chain]
if (is.null(parts)) {
parts <- mcmclist$information$parts
}
mcmc <- .mcmc.divide(mcmc.chain, parts)
return(mcmc)
}
|
# stroop ----
#' Stroop Task
#'
#' 50 simulated subject in a stroop task viewing all combinations of word and ink colours blue, purple, green, red, and brown, 5 times each. Subjects respond with the ink colour. Subjects who do not respond in time have NA for response and rt.
#'
#' @format A data frame with 12500 rows and 5 variables:
#' \describe{
#' \item{sub_id}{Subject ID}
#' \item{word}{The text of the word}
#' \item{ink}{The ink colour of the word}
#' \item{response}{The subject's response (should equal the ink colour)}
#' \item{rt}{Reaction time (in ms)}
#' }
#' @source \url{https://psyteachr.github.io/psyteachr/data/stroop.csv}
"stroop"
| /R/data_stroop.R | permissive | PsyTeachR/psyteachrdata | R | false | false | 669 | r | # stroop ----
#' Stroop Task
#'
#' 50 simulated subject in a stroop task viewing all combinations of word and ink colours blue, purple, green, red, and brown, 5 times each. Subjects respond with the ink colour. Subjects who do not respond in time have NA for response and rt.
#'
#' @format A data frame with 12500 rows and 5 variables:
#' \describe{
#' \item{sub_id}{Subject ID}
#' \item{word}{The text of the word}
#' \item{ink}{The ink colour of the word}
#' \item{response}{The subject's response (should equal the ink colour)}
#' \item{rt}{Reaction time (in ms)}
#' }
#' @source \url{https://psyteachr.github.io/psyteachr/data/stroop.csv}
"stroop"
|
## Cache the Inversion of a Matrix
## Caching is usefull to minimize avoidable computation by storing
## information in memory when no changes are verified on the input data.
## Overall Goal: Storing a Matrix and caching the reverse
## Step 1: Create a Matrix that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
# Set the value of the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
# Get the value of the matrix
get <- function() x
# Set the inverse of the matrix
setInverse <- function(inverse) m <<- inverse
# Get the inverse of the matrix
getInverse <- function() m
# Provide list with following functions
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Step 2: Return a matrix that is the inverse of 'x'
## 1) computing the inverse of the matrix created with 'makeCacheMatrix'.
## 2) option to retrieve from the cache if the inverse is already calculated.
cacheSolve <- function(x, ...) {
# getting inverse of matrix 'x' -> cache contains information
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# getting inverse of matrix 'x' -> cache is empty
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
| /cachematrix.R | no_license | oe-antunes/ProgrammingAssignment2 | R | false | false | 1,380 | r | ## Cache the Inversion of a Matrix
## Caching is usefull to minimize avoidable computation by storing
## information in memory when no changes are verified on the input data.
## Overall Goal: Storing a Matrix and caching the reverse
## Step 1: Create a Matrix that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
# Set the value of the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
# Get the value of the matrix
get <- function() x
# Set the inverse of the matrix
setInverse <- function(inverse) m <<- inverse
# Get the inverse of the matrix
getInverse <- function() m
# Provide list with following functions
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Step 2: Return a matrix that is the inverse of 'x'
## 1) computing the inverse of the matrix created with 'makeCacheMatrix'.
## 2) option to retrieve from the cache if the inverse is already calculated.
cacheSolve <- function(x, ...) {
# getting inverse of matrix 'x' -> cache contains information
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# getting inverse of matrix 'x' -> cache is empty
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elnet_coord.R
\name{elnet_coord}
\alias{elnet_coord}
\title{Elastic Net}
\usage{
elnet_coord(
y,
X,
beta0 = NA,
alpha = 0,
lambda = 0,
niter = 10000,
tol = 1e-05
)
}
\arguments{
\item{y}{The constant vector}
\item{X}{The regressor}
\item{beta0}{The initial guess of the solution. If no input given, then it is
set to a zero vector.}
\item{alpha}{The elastic net parameter, alpha should be in [0,1]. More details
to be found in Trevors paper.}
\item{lambda}{Regularization parameter}
\item{niter}{Maximum Number of Iteration}
\item{tol}{Tolerance of the difference between two result from iteration}
}
\value{
The solution to the elastic net system according to given parameter.
}
\description{
A function that implements the algorithm proposed by (Zou, Hui; Hastie,
Trevor, 2015, JRRSB).
}
\author{
Xiaohan Wang
}
| /man/elnet_coord.Rd | permissive | xw547/hwpkg | R | false | true | 912 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elnet_coord.R
\name{elnet_coord}
\alias{elnet_coord}
\title{Elastic Net}
\usage{
elnet_coord(
y,
X,
beta0 = NA,
alpha = 0,
lambda = 0,
niter = 10000,
tol = 1e-05
)
}
\arguments{
\item{y}{The constant vector}
\item{X}{The regressor}
\item{beta0}{The initial guess of the solution. If no input given, then it is
set to a zero vector.}
\item{alpha}{The elastic net parameter, alpha should be in [0,1]. More details
to be found in Trevors paper.}
\item{lambda}{Regularization parameter}
\item{niter}{Maximum Number of Iteration}
\item{tol}{Tolerance of the difference between two result from iteration}
}
\value{
The solution to the elastic net system according to given parameter.
}
\description{
A function that implements the algorithm proposed by (Zou, Hui; Hastie,
Trevor, 2015, JRRSB).
}
\author{
Xiaohan Wang
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cluster_export.R
\name{cluster_export}
\alias{cluster_export}
\title{Export a column with cluster results}
\usage{
cluster_export(
spe,
cluster_var,
cluster_dir = file.path(tempdir(), "exported_clusters"),
overwrite = TRUE
)
}
\arguments{
\item{spe}{Defaults to the output of
\code{fetch_data(type = 'spe')}. This is a
\link[SpatialExperiment:SpatialExperiment]{SpatialExperiment-class}
object with the spot-level Visium data and information required for
visualizing the histology. See \code{\link[=fetch_data]{fetch_data()}} for more details.}
\item{cluster_var}{A \code{character(1)} with the name of the variable you wish to
export.}
\item{cluster_dir}{A \code{character(1)} specifying the output directory, similar
to the \code{outs/analysis/clustering} produced by SpaceRanger.}
\item{overwrite}{A \code{logical(1)} indicating whether to overwrite the \code{spe$key}.}
}
\value{
The path to the exported \code{clusters.csv} file.
}
\description{
This function creates a \code{clusters.csv} file similar to the ones created by
SpaceRanger at \code{outs/analysis/clustering} but with the \code{key} column that
combines the \code{barcode} and the \code{sample_id}, which is needed when the \code{spe}
object contains data from multiple samples given that the barcodes are
duplicated.
}
\examples{
if (enough_ram()) {
## Obtain the necessary data
if (!exists("spe")) spe <- fetch_data("spe")
## Export two cluster variables
cluster_export(spe, "spatialLIBD")
cluster_export(spe, "GraphBased")
}
}
\seealso{
Other cluster export/import utility functions:
\code{\link{cluster_import}()}
}
\concept{cluster export/import utility functions}
| /man/cluster_export.Rd | no_license | LieberInstitute/spatialLIBD | R | false | true | 1,750 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cluster_export.R
\name{cluster_export}
\alias{cluster_export}
\title{Export a column with cluster results}
\usage{
cluster_export(
spe,
cluster_var,
cluster_dir = file.path(tempdir(), "exported_clusters"),
overwrite = TRUE
)
}
\arguments{
\item{spe}{Defaults to the output of
\code{fetch_data(type = 'spe')}. This is a
\link[SpatialExperiment:SpatialExperiment]{SpatialExperiment-class}
object with the spot-level Visium data and information required for
visualizing the histology. See \code{\link[=fetch_data]{fetch_data()}} for more details.}
\item{cluster_var}{A \code{character(1)} with the name of the variable you wish to
export.}
\item{cluster_dir}{A \code{character(1)} specifying the output directory, similar
to the \code{outs/analysis/clustering} produced by SpaceRanger.}
\item{overwrite}{A \code{logical(1)} indicating whether to overwrite the \code{spe$key}.}
}
\value{
The path to the exported \code{clusters.csv} file.
}
\description{
This function creates a \code{clusters.csv} file similar to the ones created by
SpaceRanger at \code{outs/analysis/clustering} but with the \code{key} column that
combines the \code{barcode} and the \code{sample_id}, which is needed when the \code{spe}
object contains data from multiple samples given that the barcodes are
duplicated.
}
\examples{
if (enough_ram()) {
## Obtain the necessary data
if (!exists("spe")) spe <- fetch_data("spe")
## Export two cluster variables
cluster_export(spe, "spatialLIBD")
cluster_export(spe, "GraphBased")
}
}
\seealso{
Other cluster export/import utility functions:
\code{\link{cluster_import}()}
}
\concept{cluster export/import utility functions}
|
# Script to read in a csv file of subsetted Apple mobility data for any US state
# and write a new csv file only including transportation tallies for this subset
# using function in the code/functions directory
# Madison Ng
# February 24, 2021
# mtng2@dons.usfca.edu
# load function
source("code/functions/city_county_transport_tally.R")
# test use of function with normal data file
city_county_transport_tally(
input_file_name = "output/applemobilitytrends-2021-02-22_Arizona.csv",
state_to_tally = "Arizona")
# test use of function with data file missing information
city_county_transport_tally(
input_file_name = paste0("output/",
"applemobilitytrends-2021-02-22_",
"Hawaii_missing_rows_test.csv"),
state_to_tally = "Hawaii")
| /code/archived_code/02_count_up_cities_counties_original.R | no_license | ngmadison/analyze_apple_covid_mobility_data | R | false | false | 796 | r | # Script to read in a csv file of subsetted Apple mobility data for any US state
# and write a new csv file only including transportation tallies for this subset
# using function in the code/functions directory
# Madison Ng
# February 24, 2021
# mtng2@dons.usfca.edu
# load function
source("code/functions/city_county_transport_tally.R")
# test use of function with normal data file
city_county_transport_tally(
input_file_name = "output/applemobilitytrends-2021-02-22_Arizona.csv",
state_to_tally = "Arizona")
# test use of function with data file missing information
city_county_transport_tally(
input_file_name = paste0("output/",
"applemobilitytrends-2021-02-22_",
"Hawaii_missing_rows_test.csv"),
state_to_tally = "Hawaii")
|
library('stringr')
library('ggplot2')
library('grid')
library('gridExtra')
### Plot some ADC samples from a capacitor discharge or constant
### charge/distcharge looking at curve/line fitting and distribution
### of the samples
### This could be a lot neater
### A (different) example fo what was executed
###
### >>> [pulse.send(pulses) in range(10)] ; print(mean1000samples_alt2(p1))
### [False]
### 10470.7
### >>> pulses
### array('H', [40000, 500])
### 1000 ADC values from CP 5.0.0 on a CLUE (nRF52840 12 bit) Copied in from REPL
### samples 1 2.2uF non-continuous
samples1 <- c(
10592, 10624, 10544, 10624, 10656, 10592, 10544, 10560, 10576, 10576,
10544, 10544, 10640, 10640, 10592, 10480, 10560, 10608, 10592, 10352,
10592, 10336, 10544, 10528, 10608, 10592, 10544, 10432, 10608, 10496,
10528, 10640, 10656, 10544, 10656, 10528, 10560, 10624, 10592, 10576,
10560, 10448, 10112, 10624, 10640, 10624, 10560, 10512, 10496, 10624,
10512, 10512, 10624, 10752, 10480, 10544, 10544, 10560, 10784, 10480,
10528, 10640, 10528, 10496, 10704, 10592, 10480, 10560, 10592, 10512,
10576, 10384, 10624, 10624, 10496, 10512, 10480, 10528, 10608, 10528,
10576, 10496, 10512, 10480, 10336, 10736, 10544, 10480, 10496, 10416,
10624, 10544, 10544, 10576, 10576, 10560, 10528, 10528, 10512, 10560,
10560, 10528, 10560, 10496, 10144, 10496, 10832, 10528, 10560, 10496,
10560, 10592, 10496, 10432, 10480, 10560, 10480, 10544, 10480, 10544,
10496, 10512, 10544, 10512, 10528, 10560, 10512, 10544, 10544, 10496,
10512, 10496, 10368, 10480, 10528, 10480, 10496, 10560, 10496, 10544,
10496, 10432, 10512, 10544, 10528, 10544, 10560, 10560, 10544, 10592,
10448, 10400, 10512, 10560, 10528, 10528, 10480, 10496, 10512, 10464,
10464, 10480, 10464, 10480, 10480, 10496, 10560, 10496, 10464, 10448,
10448, 10528, 10496, 10496, 10464, 10480, 10560, 10496, 10496, 10640,
10560, 10512, 10480, 10512, 10496, 10480, 10480, 10544, 10464, 10480,
10624, 10272, 10528, 10496, 10464, 10800, 10496, 10528, 10448, 10560,
10512, 10496, 10464, 10528, 10480, 10576, 10480, 10560, 10480, 10576,
10480, 10512, 10608, 10448, 10496, 10480, 10512, 10752, 10480, 10480,
10480, 10480, 10480, 10464, 10480, 10544, 10464, 10432, 10608, 10384,
10368, 10512, 10512, 10480, 10528, 10272, 10336, 10448, 10480, 10528,
10416, 10464, 10448, 10416, 10480, 10576, 10512, 10496, 10512, 10528,
10464, 10432, 10464, 10512, 10432, 10544, 10432, 10448, 10480, 10448,
10432, 10464, 10352, 10432, 10480, 10464, 10528, 10480, 10416, 10480,
10480, 10480, 10496, 10416, 10480, 10288, 10512, 10448, 10448, 10464,
10304, 10384, 10480, 10448, 10480, 10624, 10496, 10448, 10368, 10496,
10560, 10464, 10528, 10336, 10416, 10496, 10416, 10416, 10496, 10432,
10352, 10416, 10496, 10496, 10432, 10560, 10416, 10528, 10320, 10464,
10512, 10496, 10496, 10400, 10416, 10496, 10544, 10320, 10304, 10496,
10352, 10400, 10512, 10352, 10464, 10384, 10400, 10496, 10480, 10416,
10464, 10512, 10528, 10352, 10448, 10416, 10416, 10480, 10496, 10560,
10416, 10400, 10416, 10448, 10480, 10320, 10400, 10720, 10416, 10416,
10464, 10448, 10432, 10464, 10448, 10384, 10416, 10432, 10416, 10432,
10352, 10416, 10352, 10224, 10416, 10464, 10464, 10496, 10416, 10128,
10432, 10720, 10480, 10416, 10448, 10416, 10464, 10368, 10432, 10480,
10432, 10464, 10496, 10480, 10464, 10432, 10352, 10400, 10496, 10416,
10464, 10448, 10368, 10400, 10496, 10224, 10400, 10480, 10464, 10432,
10448, 10416, 10416, 10496, 10496, 10464, 10416, 10400, 10368, 10384,
10432, 9968, 10192, 10416, 10400, 10480, 10512, 10480, 10432, 10432,
10480, 10352, 10432, 10416, 10512, 10384, 10176, 10752, 10528, 10192,
10352, 10432, 10384, 10480, 10448, 10368, 10160, 10368, 10448, 10528,
10448, 10352, 10320, 10416, 10400, 10384, 10336, 10432, 10368, 10384,
10432, 10432, 10448, 10352, 10384, 10416, 10496, 10496, 10496, 10256,
10080, 10624, 10448, 10384, 10336, 10448, 10400, 10432, 10400, 10432,
10416, 10352, 10368, 10416, 10224, 10352, 10352, 10432, 10336, 10432,
10288, 10464, 10432, 10240, 10432, 10352, 10400, 10432, 10416, 10464,
10288, 10432, 10464, 10352, 10416, 10384, 10416, 10432, 10448, 10448,
10448, 10656, 10480, 10400, 10432, 10432, 10416, 10432, 10480, 10464,
10352, 10432, 10464, 10384, 10528, 10432, 10480, 10496, 10416, 10496,
10416, 10496, 10368, 10416, 10800, 10400, 10352, 10576, 10384, 10432,
10448, 10448, 10448, 10432, 10496, 10416, 10496, 10320, 10384, 10480,
10416, 10336, 10416, 10336, 10352, 10400, 10368, 10288, 9792, 10320,
10368, 10352, 10400, 10320, 10432, 10160, 10384, 10432, 10432, 10368,
10432, 10352, 10304, 10416, 10432, 10368, 10352, 10432, 10352, 10496,
10352, 10384, 10464, 10432, 10400, 10400, 10416, 10336, 10432, 10224,
10592, 10368, 10432, 10352, 10384, 10368, 10432, 10432, 10416, 10352,
10272, 10416, 10384, 10448, 10368, 10352, 10304, 10240, 10400, 10528,
10416, 10352, 10368, 10416, 10448, 10464, 10368, 10256, 10400, 10400,
10352, 10368, 10432, 10352, 10432, 10448, 10368, 10336, 10352, 10432,
10448, 10544, 10368, 10496, 10384, 10240, 10368, 10256, 10352, 10288,
10320, 10304, 10384, 10352, 10400, 10432, 10384, 10384, 10352, 10352,
10368, 10384, 10352, 10368, 10752, 10368, 10368, 10416, 10336, 10224,
10400, 10400, 10432, 10416, 10496, 10432, 10432, 10384, 10304, 10336,
10336, 10368, 10384, 10384, 10368, 10496, 10304, 10384, 10384, 10384,
10352, 10304, 10432, 10352, 10272, 10352, 10416, 10368, 10512, 10336,
10432, 10304, 10336, 10336, 10368, 10368, 10368, 10320, 10368, 10352,
10400, 10304, 10352, 9968, 10352, 10288, 10272, 10368, 10320, 10464,
10368, 10320, 10336, 10432, 10656, 10368, 10368, 10336, 10384, 10336,
10336, 10352, 10336, 10224, 10368, 10384, 10336, 10336, 10336, 10320,
10384, 10352, 10368, 10320, 10272, 10224, 10352, 10368, 10320, 10320,
10352, 10304, 10352, 10352, 10224, 10352, 10288, 10288, 10416, 10352,
10432, 10368, 10304, 10112, 10288, 10352, 10400, 10320, 10352, 10352,
10320, 10208, 10512, 10288, 10208, 10320, 10272, 10304, 10368, 10352,
10368, 10304, 10352, 10336, 10288, 10272, 10336, 10368, 10288, 10560,
10240, 10320, 10464, 10432, 10304, 10288, 10304, 10256, 10304, 10320,
10368, 10272, 10368, 10256, 10224, 10352, 10304, 10256, 10320, 10288,
10336, 10288, 10320, 10224, 10320, 10320, 10400, 10368, 10304, 10384,
10368, 10320, 10320, 10304, 10288, 10368, 10336, 10416, 10320, 10304,
10336, 10288, 10304, 10352, 10352, 10304, 10256, 10352, 10240, 10288,
10304, 10256, 10304, 10400, 10288, 10384, 10272, 10320, 10336, 10224,
10368, 10256, 10368, 10304, 10272, 10288, 10432, 10352, 10288, 10336,
10288, 10368, 10304, 9552, 10320, 10336, 10288, 10384, 10320, 10272,
10320, 10368, 10256, 10368, 10336, 10368, 10288, 10352, 10352, 10288,
10512, 10320, 10320, 10352, 10336, 10272, 10160, 10336, 10288, 10272,
10336, 10288, 10416, 10208, 10304, 10272, 10320, 10288, 10336, 10400,
10304, 10368, 10352, 10336, 10208, 10288, 10352, 10240, 10336, 10352,
10288, 10288, 10288, 10288, 10352, 10272, 10272, 10224, 10176, 10224,
10224, 10224, 10272, 10384, 10368, 10208, 10320, 10320, 10320, 10336,
10304, 9696, 10304, 10400, 10304, 10304, 10256, 10256, 10288, 10272,
10304, 10272, 10240, 10320, 10304, 10240, 10208, 10272, 10208, 10336,
10320, 10320, 10224, 10208, 10224, 10320, 10288, 10256, 10208, 10320,
10240, 10224, 10224, 10352, 10384, 10336, 10336, 10304, 10304, 10256,
10304, 10304, 10256, 10368, 10288, 10288, 10288, 10304, 10368, 10240,
10272, 10304, 10240, 10320, 10160, 10320, 10240, 10304, 10208, 10160,
10320, 10256, 10224, 10288, 10224, 10224, 10256, 10304, 10208, 10240,
10304, 10288, 10368, 10336, 10256, 10176, 10304, 10288, 10240, 10224,
10272, 10224, 10288, 10256, 10320, 10224, 10352, 10272, 10288, 10224)
### samples2 is 0.1uF ceramic/mylar with continuous pulsing at 400kHz 55000 duty
###
### >>> discard = [print(" ", ", ".join(map(str,sample_store_list[x:x+10])), end=",\n") for x in range(0,1000,10)]
samples2 <- c(
23808, 23712, 23680, 23696, 23696, 23712, 23696, 23712, 23696, 23648,
23472, 23712, 23520, 23616, 23744, 23568, 23680, 23200, 23680, 23744,
23712, 23760, 23152, 24208, 23984, 23456, 23280, 23664, 24256, 24528,
23664, 23600, 23696, 23616, 23712, 23712, 24128, 23680, 23744, 23584,
23936, 23600, 23552, 23936, 23648, 23840, 24448, 22928, 23664, 23360,
24304, 23568, 23744, 23616, 23632, 23664, 23632, 23664, 23664, 23664,
23776, 23776, 23744, 23424, 24128, 23872, 23552, 23600, 23728, 23696,
23712, 23520, 23536, 23872, 23584, 23664, 23936, 23632, 23744, 23344,
23680, 23712, 23664, 23744, 23664, 23600, 23600, 23680, 23648, 23856,
23664, 23616, 23840, 23248, 24000, 23696, 23712, 23632, 24352, 23664,
23664, 23776, 23616, 23584, 23600, 23712, 23664, 23632, 23888, 23856,
23472, 23568, 23600, 23744, 23664, 23728, 23664, 23728, 23584, 23600,
23872, 23696, 23440, 23568, 23504, 23712, 24336, 23744, 23712, 23760,
23600, 23712, 23696, 23616, 23776, 23680, 23600, 23712, 23664, 23728,
23680, 23712, 23488, 23616, 22864, 23696, 23680, 23584, 23680, 23632,
23808, 23776, 23552, 23664, 23680, 23744, 23680, 23664, 23488, 23760,
23744, 23808, 23536, 23728, 23712, 23408, 23648, 23744, 23712, 23728,
23536, 23680, 23520, 23680, 23600, 23600, 23712, 23760, 23520, 23664,
23664, 23616, 23744, 23744, 23600, 23712, 23488, 23744, 23728, 23088,
23616, 23600, 23664, 24400, 23664, 23936, 23776, 23472, 23872, 24112,
23472, 23680, 23728, 23664, 23536, 23664, 23568, 23664, 23680, 23552,
23648, 23712, 23584, 23552, 23584, 23696, 23728, 23680, 23616, 23664,
23600, 23824, 23504, 23680, 23360, 23232, 23680, 23904, 24320, 23744,
23664, 23664, 23728, 23648, 23648, 23680, 23648, 23664, 23616, 23696,
23696, 23536, 23760, 23648, 23648, 23776, 23696, 23792, 23536, 23504,
23904, 23552, 23664, 24272, 23456, 23472, 23168, 23680, 23712, 23696,
23568, 23552, 23632, 23664, 23648, 23504, 23712, 24016, 23888, 23824,
23744, 23632, 23664, 23696, 23680, 23744, 23664, 23600, 23744, 23456,
23552, 23600, 23664, 23808, 23664, 23664, 23376, 23728, 23440, 23728,
23424, 23680, 24320, 23616, 23824, 23520, 23536, 23584, 23520, 23680,
23680, 23600, 23840, 23680, 23744, 23312, 23168, 23536, 23264, 23712,
23712, 23648, 23120, 23664, 23584, 23664, 23600, 23520, 23696, 23696,
22432, 23680, 23728, 23456, 23648, 23664, 23776, 23600, 23664, 23632,
23712, 22592, 23568, 23344, 23632, 24064, 23616, 22880, 23968, 23568,
23008, 23792, 23488, 24240, 23648, 23680, 23504, 23712, 23808, 23552,
23776, 23600, 23456, 23408, 24352, 23040, 23744, 23552, 23760, 23680,
23600, 23664, 23632, 23264, 23680, 23648, 23296, 23520, 23840, 23344,
23632, 23680, 23664, 23712, 23728, 23488, 23632, 23648, 23696, 23664,
23616, 23696, 23744, 23616, 23632, 23664, 23600, 23680, 23584, 23744,
23504, 23824, 23600, 23632, 23536, 23680, 23712, 23376, 24096, 23600,
23456, 23728, 23504, 23744, 23632, 23712, 23664, 23600, 23664, 23808,
23504, 23648, 23760, 23968, 23616, 24000, 23840, 23680, 23696, 23808,
24208, 23600, 23600, 23680, 23888, 23312, 23488, 23728, 23456, 23744,
23648, 23696, 23648, 23680, 23728, 23552, 23680, 23536, 23808, 23632,
23664, 23648, 23680, 23920, 23648, 23152, 23712, 23280, 23632, 23712,
23664, 23664, 23664, 23728, 23808, 23664, 23712, 23600, 23744, 23664,
23664, 23760, 23664, 23552, 23680, 23680, 23680, 22768, 23680, 23808,
23600, 24272, 23600, 23344, 23680, 23968, 23584, 23664, 23536, 23552,
23872, 23664, 23824, 23968, 24064, 23632, 23632, 23712, 23568, 23728,
23744, 23600, 23632, 23888, 23232, 23664, 23632, 23744, 23648, 23344,
23616, 23632, 23712, 23744, 23600, 23664, 23552, 23712, 23680, 23664,
23664, 23680, 23696, 23760, 23760, 23552, 23744, 23600, 23488, 23376,
23600, 24096, 23712, 23584, 23616, 23744, 23664, 23584, 23600, 23840,
23760, 23744, 23664, 23568, 23728, 23616, 23648, 23680, 23280, 23744,
23632, 23536, 23760, 23616, 23664, 23648, 23696, 23680, 23616, 23616,
23488, 23648, 24112, 23200, 23648, 23840, 23520, 23712, 23600, 23552,
23664, 23664, 23728, 23664, 23712, 23680, 23712, 23520, 23584, 23680,
23696, 23680, 23632, 23680, 23664, 23536, 23584, 23680, 23568, 23472,
23680, 23680, 23664, 23680, 23648, 23616, 23568, 23712, 23408, 23600,
23696, 23712, 24080, 23552, 23152, 23728, 23680, 23824, 23696, 23664,
23488, 23760, 23808, 23472, 23552, 23616, 23680, 23760, 23696, 23408,
23904, 23792, 23360, 23408, 23664, 23808, 23648, 23408, 23760, 23568,
23488, 23632, 23728, 23680, 23632, 23664, 23536, 23760, 23744, 23328,
23744, 23552, 24448, 23536, 23488, 23840, 21808, 24176, 23696, 23424,
23696, 23104, 23504, 23728, 23696, 23744, 23728, 23680, 23648, 23632,
23872, 23520, 23520, 23824, 23648, 24000, 23680, 24416, 23680, 23520,
23536, 23648, 23680, 23680, 23680, 23680, 23680, 23536, 23552, 23744,
23600, 23632, 23536, 23792, 23680, 23680, 23664, 23744, 23472, 23648,
23664, 23680, 23712, 24128, 23680, 23488, 23552, 23664, 23664, 23696,
23664, 23232, 23456, 23344, 23968, 23504, 23584, 23664, 23664, 23968,
22976, 23632, 23648, 23504, 23536, 23664, 23680, 23744, 23664, 23728,
23664, 23520, 23536, 23792, 23792, 23680, 23600, 23392, 23616, 23664,
24112, 23472, 23456, 23808, 23680, 23696, 23664, 23664, 23680, 23520,
23536, 23248, 23664, 23648, 23552, 23392, 23680, 23840, 23648, 24000,
23680, 23600, 23664, 23664, 23680, 23680, 23648, 23136, 23584, 23616,
24000, 23936, 23648, 23728, 23728, 23536, 23536, 23600, 23712, 23728,
23664, 23680, 23712, 23760, 23616, 23472, 23520, 23600, 23680, 23856,
23664, 23888, 24608, 23552, 23552, 23600, 23664, 23696, 23680, 24208,
23616, 23472, 23600, 24288, 24272, 23680, 23696, 23680, 23632, 23136,
23696, 23616, 23568, 23696, 23680, 23648, 23616, 23712, 23744, 23216,
23552, 23296, 23616, 23856, 23680, 23712, 23664, 23536, 23536, 23568,
23536, 23728, 23712, 23664, 23680, 23648, 23632, 23568, 23536, 23664,
23696, 23776, 23600, 23664, 23648, 23232, 23552, 23616, 23616, 23648,
23552, 24336, 23632, 23680, 23328, 23536, 23344, 24144, 23648, 24096,
23680, 23536, 23648, 23744, 23552, 23760, 23664, 23616, 23632, 23632,
23680, 23552, 23616, 23696, 23616, 23712, 23648, 23680, 23440, 23632,
23840, 23488, 23584, 23344, 23664, 23552, 23632, 23664, 23824, 23488,
23584, 23712, 23680, 23648, 23664, 23616, 23712, 22336, 23616, 23632,
23760, 23680, 23712, 23312, 23728, 23680, 23744, 23200, 23536, 23856,
23296, 22944, 23632, 23792, 23632, 23520, 23504, 23664, 23632, 23648,
23616, 23664, 23680, 23584, 23616, 23504, 23168, 23616, 23680, 22608,
23664, 23632, 23632, 23552, 23568, 23680, 23616, 23680, 23680, 23664,
23248, 23536, 23616, 23424, 23728, 23600, 23744, 23728, 23808, 23472,
23264, 23488, 23088, 23664, 22912, 24000, 23408, 23680, 23616, 23472,
23472, 23696, 23568, 23664, 23696, 23792, 23712, 23856, 23536, 23888,
23536, 23920, 23568, 23792, 23600, 23680, 23648, 23664, 23536, 23664,
23744, 23648, 23616, 23600, 23664, 23536, 23536, 23728, 23648, 23664,
23808, 23616, 23408, 23920, 23712, 23376, 23552, 23808, 23648, 23696,
23600, 23328, 23536, 23552, 23280, 23728, 23776, 23536, 23712, 23568,
23600, 23264, 23488, 23680, 23664, 23600, 23712, 23680, 23616, 23648
)
### same as samples2
samples3 <- c(
23664, 23648, 23680, 23568, 23472, 23552, 23808, 23680, 23648, 23664,
23712, 23440, 23232, 24048, 23712, 23696, 23696, 23600, 23664, 23664,
23664, 23808, 23568, 23536, 23744, 23312, 23840, 23680, 23712, 23664,
23680, 23648, 23664, 23664, 23584, 23664, 23744, 23712, 23744, 23536,
23888, 23328, 23344, 23632, 23584, 23744, 23632, 23712, 23824, 23584,
23664, 23632, 23504, 23584, 23712, 23680, 23504, 23600, 23600, 23648,
23408, 23360, 23680, 23648, 22080, 23664, 23600, 23712, 23664, 23680,
23648, 23696, 23584, 23584, 23680, 23664, 23792, 23648, 22912, 23648,
23488, 23488, 23616, 23648, 23520, 23632, 23664, 23648, 23936, 23616,
23680, 23392, 23904, 23680, 23616, 23616, 23600, 23744, 23712, 23488,
23600, 23872, 23936, 23664, 23936, 23712, 23760, 23504, 23712, 23568,
23680, 23616, 23600, 23632, 23648, 23648, 23760, 23616, 23920, 23680,
23680, 23760, 23744, 23648, 23840, 23584, 23584, 23856, 23376, 23424,
23648, 23920, 23696, 23792, 23680, 23552, 23760, 23520, 23552, 23728,
23680, 23712, 23680, 23632, 23696, 23696, 23568, 23712, 23616, 23696,
23680, 23600, 23648, 23632, 23616, 23728, 24176, 23792, 23472, 24160,
23680, 23488, 23664, 23744, 23696, 23456, 23344, 23600, 23632, 23632,
23616, 23904, 23664, 23536, 23504, 23680, 23648, 23648, 23632, 23728,
23696, 23632, 23520, 23392, 23664, 23744, 23680, 23648, 23664, 23744,
23536, 23536, 22640, 23104, 23664, 23632, 23680, 23344, 23456, 23376,
23680, 23664, 23808, 23616, 23552, 23632, 23744, 23776, 23600, 23584,
23600, 23664, 23840, 23664, 22992, 23536, 23552, 23552, 23584, 23632,
23584, 23712, 23520, 23792, 23712, 23712, 23584, 23296, 23776, 23616,
23280, 23664, 23568, 23696, 23648, 23552, 23680, 23680, 23648, 24192,
24352, 23776, 23664, 23584, 23792, 23632, 23600, 23680, 23680, 23824,
23712, 23552, 23744, 23728, 23536, 23200, 23616, 23728, 23504, 23536,
23648, 23808, 23552, 23680, 23696, 23648, 23168, 23568, 23744, 23600,
23648, 23632, 23616, 23584, 24272, 23584, 23616, 23696, 23568, 23680,
23840, 23744, 23632, 24240, 23616, 23776, 23568, 23680, 23744, 23648,
23648, 23648, 23680, 23680, 23696, 23712, 23696, 23568, 23728, 23648,
23616, 23728, 23888, 23616, 23696, 23936, 23616, 24064, 23648, 23536,
23504, 23648, 23584, 23648, 23616, 23648, 23680, 23936, 23584, 23680,
23568, 24368, 23504, 23664, 23488, 24000, 23904, 23536, 23552, 23696,
23312, 23584, 23744, 23600, 23696, 23648, 23552, 23584, 23680, 23536,
23696, 23648, 23552, 23728, 23536, 23616, 23664, 23872, 23600, 24080,
23200, 23936, 23360, 23536, 23616, 23776, 23696, 23632, 23632, 23680,
23648, 23680, 23712, 23152, 23280, 23632, 23568, 23504, 23664, 23680,
23712, 23632, 23568, 23664, 23568, 23664, 23632, 23664, 23680, 23616,
23664, 23344, 23552, 23232, 23504, 23680, 23600, 23664, 23552, 23520,
23552, 23600, 23072, 23712, 23648, 23648, 23744, 23536, 23712, 23568,
23888, 23744, 23552, 24176, 23808, 23952, 22544, 23440, 23680, 23616,
23712, 22880, 23680, 23248, 23744, 23520, 23632, 23632, 23664, 23680,
23600, 23664, 23760, 23584, 23488, 24304, 22896, 23744, 23632, 23680,
23680, 23648, 23696, 23744, 23856, 23568, 23600, 23648, 23632, 23648,
23744, 23600, 23792, 24000, 23488, 23616, 23680, 23632, 23952, 23648,
23472, 23776, 23552, 23632, 23696, 23664, 23552, 23648, 23680, 23680,
23520, 23648, 23696, 23648, 23664, 23616, 23680, 23584, 23536, 23664,
23552, 23728, 23664, 23856, 23616, 23744, 23664, 23568, 23664, 23712,
23808, 23712, 23600, 23712, 23504, 23280, 23920, 23712, 23616, 23680,
23696, 23296, 23504, 23456, 23568, 23568, 23552, 23664, 23600, 23632,
23600, 23648, 23776, 23536, 23440, 23536, 23680, 23744, 23648, 23760,
23680, 23744, 23520, 23648, 23632, 23680, 23744, 23680, 23600, 23760,
24240, 23568, 23280, 23728, 23712, 23680, 23648, 23728, 23696, 23616,
23664, 23632, 23680, 23328, 23712, 23840, 23872, 23200, 23728, 23632,
23664, 23680, 24416, 23520, 23664, 23712, 23696, 23536, 23664, 23888,
23632, 23072, 23696, 23376, 23712, 23648, 23424, 23552, 23744, 23664,
23616, 23680, 23600, 23664, 23680, 23584, 23632, 23632, 23872, 23632,
24192, 23632, 23680, 23472, 23728, 23664, 23552, 23680, 23712, 23680,
23648, 23664, 23552, 23536, 23152, 23520, 23600, 23600, 23696, 23584,
23472, 23776, 23280, 24048, 23568, 23584, 23616, 23680, 23680, 23728,
23552, 23536, 23728, 23680, 23600, 23648, 23600, 23568, 23392, 23648,
23536, 23776, 23616, 23552, 23648, 23584, 23680, 23648, 23536, 23728,
23584, 23616, 23696, 23744, 23600, 23536, 23280, 23968, 23552, 23104,
23632, 23840, 23728, 23600, 23680, 23664, 23488, 24112, 23680, 23776,
23680, 23600, 23792, 23728, 23728, 23504, 23296, 23648, 23696, 23616,
23680, 23648, 23568, 23664, 23712, 23552, 23536, 23648, 23632, 23696,
23840, 23536, 23744, 23472, 23552, 23680, 23568, 23600, 23648, 23616,
23776, 23664, 23488, 23712, 23664, 23648, 23808, 23680, 23952, 23680,
23696, 23616, 23856, 23664, 23200, 23760, 23568, 23728, 23664, 23616,
23648, 23664, 23648, 23808, 23904, 23184, 23760, 23632, 23584, 23056,
23712, 23632, 23632, 23616, 23648, 23696, 23488, 23344, 23680, 23600,
23984, 23680, 23616, 23712, 23472, 23616, 23712, 23680, 23600, 23616,
23680, 23664, 23872, 23344, 23664, 23680, 23552, 23664, 23920, 23712,
23680, 23616, 23680, 23664, 22496, 23760, 23632, 23776, 24448, 23648,
23648, 23664, 23488, 23744, 23472, 23728, 23600, 23680, 23600, 23600,
23936, 23936, 23552, 23712, 23568, 23600, 23600, 23600, 23648, 23664,
23552, 23808, 23680, 23680, 23504, 23632, 23744, 23600, 23664, 23808,
23984, 23488, 23600, 23680, 23728, 23664, 23648, 23664, 23888, 23504,
23632, 23664, 23664, 23744, 23680, 23712, 23648, 23616, 23360, 23744,
23520, 23600, 23872, 24016, 23664, 24000, 23632, 23648, 23088, 23600,
23744, 23696, 23504, 23712, 23664, 23648, 23680, 23296, 23664, 23680,
23552, 23360, 23328, 23520, 23040, 23584, 23712, 23680, 23568, 23648,
22960, 23248, 23632, 23632, 23680, 24016, 23312, 23728, 23600, 23728,
23440, 23632, 23648, 23552, 23728, 23568, 23472, 23616, 23760, 23264,
23888, 23872, 23568, 23648, 23872, 23632, 23712, 23504, 23648, 24000,
23008, 23920, 23712, 23552, 23680, 23616, 23664, 23536, 23680, 23504,
23584, 23600, 23744, 23552, 23488, 23600, 23632, 23680, 23776, 23600,
23792, 23584, 23552, 23600, 23680, 23712, 23568, 23744, 23664, 23504,
23472, 23808, 23808, 23552, 23584, 23648, 23648, 23584, 23568, 23696,
23456, 23552, 23792, 23600, 23040, 23440, 23744, 24176, 23536, 23680,
23600, 23568, 23616, 23616, 23792, 23696, 23648, 23632, 23552, 23792,
23856, 23616, 23664, 23216, 23536, 23616, 23472, 23696, 23008, 23728,
23696, 23552, 23744, 23600, 23536, 24032, 23664, 23056, 23824, 23696,
23600, 23872, 23728, 23312, 23584, 23680, 23680, 23552, 24176, 23648,
24064, 23664, 23488, 23616, 23680, 23664, 23648, 23584, 23744, 23632,
23648, 23584, 23600, 23568, 23616, 23728, 23744, 23648, 23712, 23616,
23600, 23824, 23632, 23616, 23616, 23680, 23568, 23664, 23552, 23024,
23760, 24144, 23616, 23680, 23440, 23616, 23360, 23136, 23680, 23648,
23536, 23568, 23680, 24080, 23840, 23680, 23632, 23536, 23616, 23648,
23664, 23680, 23648, 23680, 23488, 23488, 23552, 23632, 23600, 23584
)
samples4 <- c(
23648, 23264, 23600, 23472, 23504, 23280, 23872, 23760, 23840, 23696,
23680, 23792, 23872, 23440, 23840, 23776, 23424, 23760, 23776, 23664,
23808, 23728, 23520, 23840, 23376, 23792, 24288, 23760, 23808, 23728,
23744, 23712, 23728, 23952, 23760, 23712, 23824, 23904, 23568, 23808,
23632, 24000, 24000, 24016, 23424, 23760, 23824, 23872, 23760, 23744,
23664, 23792, 23776, 23808, 23616, 23824, 23712, 23472, 23792, 23696,
23808, 23808, 23616, 23936, 23776, 23776, 23408, 23712, 23712, 23568,
23424, 24304, 23744, 23728, 23744, 23680, 23792, 23824, 23728, 23648,
23744, 23728, 23728, 24080, 23840, 23680, 23936, 23600, 23792, 23584,
23616, 23664, 23664, 23744, 23664, 23712, 23776, 23664, 23600, 23840,
23648, 23664, 23616, 23632, 23824, 23712, 22784, 23664, 23696, 23712,
23824, 23712, 23728, 23872, 23664, 23616, 23808, 23696, 23744, 23728,
23808, 23728, 24096, 23664, 23776, 23408, 23728, 23664, 23728, 23792,
23760, 23744, 23712, 24160, 23632, 23568, 23760, 23776, 23696, 23712,
23648, 23808, 23616, 23664, 23248, 23664, 23696, 23680, 23776, 23840,
23888, 24320, 23536, 23424, 22880, 23744, 23680, 23552, 23664, 23776,
23712, 23600, 23664, 23680, 23664, 23776, 23632, 23872, 23920, 23680,
23808, 23648, 23664, 23712, 23664, 23552, 23696, 23552, 23376, 23808,
23536, 23712, 23728, 23776, 23696, 24176, 23568, 23840, 23424, 23760,
23760, 23744, 23664, 23888, 23584, 23888, 23600, 24016, 23488, 23808,
23840, 23712, 23648, 23904, 23808, 23728, 23648, 23632, 23744, 23888,
23536, 23744, 23632, 23760, 23072, 23680, 23840, 23728, 23584, 23680,
23696, 23712, 23360, 23632, 23664, 23328, 23760, 23680, 23664, 23760,
23664, 23360, 23744, 23760, 23760, 23808, 23664, 23712, 24016, 23776,
24336, 23776, 23776, 24160, 24112, 23648, 23888, 23920, 23776, 24064,
23760, 23744, 23840, 23728, 23712, 23696, 23696, 23648, 23920, 23648,
23680, 23408, 23680, 23680, 23680, 23632, 23584, 23664, 23728, 23904,
23600, 23648, 23776, 23648, 23552, 23744, 23712, 23696, 23680, 23840,
23696, 23744, 23712, 23744, 23872, 23808, 23680, 23664, 23840, 23728,
23600, 23616, 23600, 23760, 23072, 23584, 23792, 23664, 24064, 23744,
23520, 23824, 23744, 23536, 23616, 23872, 24176, 23648, 23376, 23600,
23648, 23744, 23744, 23664, 23760, 23776, 23696, 23552, 23648, 23728,
23776, 23760, 23456, 23280, 23536, 24080, 23632, 22848, 23616, 23520,
23744, 23888, 23632, 23712, 23648, 23552, 23680, 23680, 23696, 23680,
23424, 23696, 23664, 23344, 23584, 23744, 23680, 23744, 23712, 23776,
23328, 24352, 23472, 23776, 24240, 23472, 23648, 23680, 23664, 23664,
23664, 23904, 23616, 23536, 23792, 23808, 23712, 23648, 23840, 23856,
23648, 23552, 23696, 23536, 23616, 23456, 23680, 23744, 23664, 23712,
23840, 23696, 23568, 23776, 23680, 23776, 23360, 23968, 23664, 23632,
23584, 23632, 23728, 23120, 23680, 23712, 23552, 23760, 23840, 23680,
23536, 23632, 23680, 23888, 24064, 23744, 24096, 23616, 23296, 23632,
24176, 23632, 23664, 23648, 23824, 23472, 23488, 23712, 23664, 23728,
23664, 23536, 23728, 23632, 23824, 23680, 23552, 23520, 23456, 23712,
23424, 23680, 23232, 23696, 23552, 23616, 23648, 23696, 23728, 22768,
23760, 23616, 23568, 23472, 23728, 23648, 23648, 23712, 24032, 24128,
23808, 23520, 23680, 23616, 23664, 23600, 23680, 23616, 23632, 23728,
23648, 23648, 23616, 23664, 23600, 23680, 23760, 23696, 23616, 23792,
23808, 23552, 23648, 23696, 23792, 23696, 23632, 23792, 23712, 23584,
23664, 23680, 23680, 23680, 23584, 23328, 23760, 23536, 23664, 23536,
23744, 23968, 23552, 23824, 23664, 23680, 23744, 23776, 23616, 23968,
23552, 23520, 23664, 23680, 23760, 23664, 23568, 23520, 24048, 23712,
23712, 23664, 23696, 23984, 23392, 23680, 23920, 23648, 24256, 23776,
23696, 23808, 23568, 23584, 23600, 23536, 24240, 23088, 23728, 23680,
23568, 23472, 23616, 23648, 23696, 23680, 23664, 23648, 23664, 23632,
23632, 23664, 23600, 23712, 23728, 23664, 23728, 23616, 23664, 23616,
23328, 23664, 23680, 23488, 23760, 23680, 23744, 23680, 23584, 23824,
23360, 23408, 24208, 24560, 23664, 23648, 23664, 23680, 23568, 23680,
23664, 23760, 23712, 23760, 23808, 23680, 23536, 23664, 23696, 23632,
23680, 23680, 23664, 23648, 23728, 23584, 23552, 23712, 23600, 23680,
23680, 23728, 23712, 23648, 23568, 23680, 23936, 23632, 23632, 23728,
23744, 23520, 23168, 23776, 23680, 23680, 23696, 23648, 23696, 23696,
23536, 23744, 23552, 23520, 23744, 23568, 23232, 23568, 23776, 23808,
23504, 23632, 23616, 23648, 23600, 23680, 23584, 23856, 23648, 23680,
23552, 23712, 23872, 23664, 23664, 24192, 23680, 23712, 23536, 23744,
23664, 23616, 23888, 23616, 23312, 23632, 23616, 23744, 23696, 23696,
23712, 23712, 23680, 23632, 23648, 23920, 23408, 22976, 23760, 23488,
23648, 23616, 23392, 23792, 23600, 23472, 23712, 23152, 23584, 23664,
23600, 23728, 23536, 23568, 23744, 22960, 23664, 23984, 23664, 23536,
23344, 23376, 24000, 23536, 23920, 23664, 23680, 23520, 23648, 23600,
23712, 23408, 23600, 23712, 23728, 23616, 23696, 23648, 23632, 23824,
23664, 24240, 23824, 23712, 23248, 23552, 24000, 23680, 23904, 23648,
23744, 23664, 23712, 23808, 23952, 23616, 23616, 23664, 23664, 23424,
23648, 23504, 23472, 23520, 23968, 23968, 23632, 23952, 23744, 23680,
23696, 23744, 23536, 23696, 23600, 23616, 23664, 23600, 23744, 23616,
23568, 23664, 23744, 23744, 23664, 23328, 23744, 23680, 23536, 23568,
23632, 23600, 23696, 23664, 23632, 23776, 23536, 23472, 23584, 23648,
23472, 23696, 23536, 23792, 23712, 23600, 23648, 23648, 23728, 24000,
23520, 23696, 23488, 23824, 23712, 23584, 23664, 23728, 23728, 23824,
23936, 23696, 23664, 23552, 23648, 23712, 23632, 23616, 23680, 23712,
23632, 23680, 23680, 23536, 23744, 23632, 23648, 23968, 22128, 23664,
24560, 23520, 23024, 23600, 23680, 23680, 24064, 23200, 23744, 23568,
23792, 23600, 23760, 23632, 23712, 23680, 23664, 23680, 23728, 23568,
23888, 23984, 23200, 23744, 23968, 23936, 23728, 23504, 23584, 23616,
24160, 23744, 23648, 23760, 23696, 23552, 23664, 23600, 23728, 23632,
23664, 23712, 23568, 23696, 23184, 23552, 23904, 23792, 23696, 23104,
23664, 23632, 23600, 23104, 23744, 23664, 23600, 23616, 23680, 23648,
23552, 23504, 23440, 23680, 23248, 23296, 23552, 22256, 24240, 23616,
23680, 23712, 23552, 23680, 23712, 23680, 23664, 23680, 23984, 23648,
23552, 23680, 23696, 23712, 23600, 23792, 23600, 23232, 23856, 23600,
23664, 23984, 24128, 23696, 23664, 23696, 23584, 23504, 23680, 23600,
23744, 23680, 23664, 23808, 23616, 23568, 23936, 22912, 23760, 23952,
23808, 24784, 23600, 23808, 23776, 23952, 23712, 23744, 23584, 23616,
23680, 23696, 23744, 23536, 23552, 23680, 23632, 23648, 23664, 23712,
23696, 23792, 23680, 23568, 23776, 23680, 23568, 23696, 23680, 23712,
23408, 23280, 23808, 23648, 23664, 23664, 23680, 23600, 23568, 23552,
23776, 23680, 23616, 23664, 23584, 23472, 23488, 23856, 23744, 23072,
23824, 23664, 23616, 23616, 23568, 23760, 23664, 23552, 23648, 22336,
24208, 23584, 23664, 23712, 23744, 23584, 23696, 23696, 24032, 23664,
23680, 23584, 23680, 23648, 23936, 23632, 23792, 23616, 23616, 23648,
23632, 23600, 23600, 24304, 23808, 23648, 23600, 23648, 23584, 23632
)
batt_samples5 <- c(
29056, 28272, 28592, 28608, 28672, 28528, 28672, 28832, 28400, 29520,
28784, 28608, 28784, 28544, 28576, 28672, 28528, 28672, 28640, 28576,
28480, 28640, 28512, 28640, 28352, 27600, 28800, 29136, 28544, 29568,
29216, 28560, 28608, 28720, 28704, 28912, 28640, 29168, 28672, 28528,
28592, 29264, 28672, 28560, 28576, 28736, 28544, 28448, 28544, 28944,
28656, 28480, 28336, 28608, 28624, 28608, 28544, 28768, 28464, 27536,
28432, 28464, 28544, 28576, 29520, 28640, 28624, 28464, 28624, 28720,
28560, 28624, 28656, 28016, 28064, 28544, 28544, 28576, 28736, 28576,
28672, 28528, 28864, 28512, 28464, 28528, 28496, 28736, 28560, 28320,
28720, 27872, 28448, 28416, 28368, 28688, 28608, 28496, 27728, 28528,
28464, 28032, 28640, 28592, 28480, 28528, 28528, 28672, 28736, 28592,
28576, 28528, 28528, 28592, 28608, 28928, 28608, 27136, 28416, 28704,
28128, 27712, 28544, 28592, 28768, 28224, 28720, 28848, 28336, 27936,
28512, 28640, 28512, 28800, 31936, 28608, 28608, 28640, 28608, 28512,
28704, 28592, 28592, 28240, 28432, 28528, 28624, 28592, 28528, 28592,
28528, 28640, 28608, 28480, 28736, 28608, 28560, 28544, 28528, 28576,
28656, 28672, 28496, 27648, 28784, 29040, 28752, 28960, 28704, 28672,
28512, 28640, 28544, 28608, 28672, 28720, 28608, 28576, 28544, 28528,
28624, 28528, 28848, 28704, 28640, 28592, 28512, 28544, 28592, 28608,
28560, 28736, 28640, 28592, 28736, 28464, 29488, 28640, 28720, 28608,
28656, 27584, 28560, 28912, 28800, 28400, 28512, 28928, 28592, 28576,
28592, 28672, 28576, 28672, 28576, 28608, 27232, 28400, 28512, 28592,
28512, 28576, 28624, 28560, 28464, 28672, 28592, 28672, 28544, 28496,
28528, 28544, 28656, 28480, 28336, 28768, 28912, 29744, 28112, 28528,
28560, 28640, 28656, 28656, 28656, 28560, 29728, 28528, 28640, 28832,
29616, 28768, 28640, 28528, 28448, 28400, 28560, 28688, 28784, 28608,
28608, 28576, 28544, 28592, 28544, 28176, 29552, 28560, 28624, 28496,
28736, 28480, 28560, 28592, 28544, 28560, 28624, 28624, 28544, 28576,
28208, 28640, 28400, 28640, 28608, 28480, 28544, 28480, 28912, 28688,
28608, 28464, 28704, 28672, 28592, 28608, 28608, 28608, 28640, 28608,
28592, 28528, 28688, 28704, 28608, 28544, 28816, 28464, 28016, 28688,
28768, 28624, 28608, 28608, 28672, 28720, 28656, 28544, 28528, 28416,
28704, 28672, 28608, 29248, 28640, 27520, 28928, 28672, 28560, 28496,
28656, 28608, 28128, 29360, 28592, 28608, 28512, 28560, 28448, 28656,
28992, 28608, 28688, 28496, 28672, 28496, 28736, 28672, 28592, 28544,
28528, 29104, 28512, 28752, 29296, 29968, 29040, 28560, 28688, 28608,
28608, 28512, 27872, 28720, 28640, 28592, 29104, 27600, 28560, 28512,
28512, 28736, 28640, 28640, 28736, 28544, 28592, 28704, 28672, 28592,
28496, 28848, 28432, 28560, 28496, 28592, 28656, 28848, 28464, 28560,
28544, 28528, 28464, 28624, 28704, 28560, 28512, 28864, 28688, 28544,
28832, 28576, 28464, 28448, 29376, 28528, 28560, 28640, 28192, 28544,
28432, 28656, 28624, 27536, 28448, 28576, 28624, 28576, 28608, 28672,
28576, 28672, 28496, 28768, 28384, 28752, 28544, 28640, 28640, 28528,
28576, 28592, 28368, 28576, 28640, 28528, 28608, 28640, 28640, 28528,
28544, 28704, 28608, 28528, 28736, 28560, 28672, 28656, 28640, 28704,
28672, 28848, 28528, 28592, 28640, 28400, 27424, 28608, 28432, 28704,
28768, 28608, 28080, 28832, 28848, 28848, 28592, 29744, 28560, 28528,
27648, 28928, 29360, 28928, 28752, 28528, 28576, 28512, 28528, 29408,
28320, 28480, 28608, 28576, 28368, 28608, 28656, 28656, 28592, 28592,
28560, 28576, 28528, 28592, 28576, 28576, 28464, 28560, 29200, 28464,
28560, 28608, 28592, 28672, 29840, 28656, 28880, 27504, 29888, 28800,
28416, 28368, 28672, 28640, 29984, 28592, 30672, 28608, 28608, 28640,
28480, 28352, 28608, 28992, 28528, 28480, 28544, 28672, 28544, 28544,
25520, 28608, 28496, 28448, 28288, 28592, 28800, 28688, 28384, 28400,
28336, 28528, 28272, 28688, 28592, 28592, 28032, 28656, 28480, 28160,
28800, 28512, 28608, 28432, 28512, 28880, 28592, 28512, 28656, 28640,
28512, 28448, 28464, 28512, 28544, 28672, 27904, 28672, 28208, 28608,
28576, 28736, 28512, 28528, 28608, 28656, 28640, 28656, 28640, 28544,
28656, 28528, 28464, 28512, 28592, 28592, 28640, 28640, 28464, 28864,
28544, 28608, 28496, 28528, 28240, 28672, 29840, 28688, 27952, 28528,
28448, 28656, 28640, 28496, 28592, 28672, 28640, 28016, 28560, 28784,
28576, 28192, 28480, 28544, 28688, 28560, 28544, 28672, 28592, 28656,
28672, 28640, 28720, 28560, 28656, 28592, 28592, 29792, 28176, 28560,
27392, 28608, 28544, 28608, 28592, 29296, 28368, 28688, 29168, 28560,
28544, 27776, 28544, 28544, 28576, 28592, 28592, 28560, 28768, 28528,
28672, 28656, 29392, 28560, 28752, 28688, 28560, 28736, 28592, 28608,
28992, 27648, 28720, 28672, 28608, 28624, 28480, 28576, 28736, 28560,
28544, 28560, 27312, 28688, 28416, 28592, 28976, 28624, 28608, 28736,
28576, 28656, 28480, 28448, 28704, 28352, 28640, 28432, 28560, 28592,
28448, 28544, 28592, 28528, 28560, 28608, 28512, 28464, 28704, 28624,
28640, 28544, 28800, 28624, 28608, 28608, 27760, 28576, 28656, 28608,
28608, 28688, 28608, 28480, 28608, 28384, 28640, 28576, 28592, 28768,
28528, 28592, 28608, 28848, 28816, 29216, 28240, 28544, 28816, 28192,
28800, 28576, 28736, 28496, 28576, 28944, 28800, 29136, 26736, 28896,
28704, 28352, 28528, 28672, 28400, 28800, 28960, 28512, 28672, 28608,
28528, 28496, 28160, 28864, 28192, 28544, 28576, 28496, 28576, 28528,
28544, 28752, 28576, 28656, 28272, 28656, 28624, 29216, 27744, 28656,
28000, 28672, 28560, 28720, 28656, 28528, 27520, 28432, 28688, 28592,
28560, 28544, 28384, 28720, 28432, 28880, 28400, 28496, 28432, 28768,
28640, 28608, 28128, 28544, 28896, 28608, 28416, 28656, 27472, 28832,
27456, 28496, 28624, 28528, 28480, 28624, 28592, 28640, 28672, 28528,
28576, 28688, 28752, 28672, 28272, 28688, 28080, 28640, 28672, 28288,
28544, 28592, 28672, 28208, 28480, 28608, 28704, 28608, 28640, 28736,
28448, 29312, 27568, 28528, 28592, 28560, 28656, 28608, 28592, 28672,
28608, 28032, 28592, 28656, 28672, 28544, 28624, 28640, 29072, 28544,
27920, 28496, 28656, 28688, 28672, 28544, 28544, 28608, 28464, 28464,
28528, 28656, 28736, 28480, 29056, 28720, 28704, 28480, 28608, 28736,
28400, 28592, 28544, 28608, 28432, 28576, 28608, 28928, 28672, 28976,
28608, 28400, 28752, 28512, 28592, 28672, 28704, 28544, 28752, 28352,
28640, 28560, 28688, 28560, 29264, 28528, 28672, 28560, 28656, 28592,
28736, 28464, 28576, 28544, 28672, 28672, 27568, 27584, 28624, 28656,
29616, 28688, 28624, 28688, 28608, 28544, 28464, 28432, 28576, 28560,
28688, 28544, 28528, 28464, 28496, 28592, 29152, 28384, 28608, 28560,
28528, 28400, 28272, 28464, 28688, 28672, 28608, 28576, 27984, 28928,
28800, 28720, 29232, 28688, 28528, 28592, 28656, 28608, 28656, 28384,
28480, 28640, 28368, 28512, 28528, 28768, 28592, 28576, 28688, 28944,
28544, 28672, 29104, 28912, 28592, 28608, 28080, 28624, 28480, 28576,
28672, 28464, 28528, 28480, 28576, 28528, 28576, 28736, 28592, 28496,
28784, 28608, 28656, 28592, 28528, 28528, 28480, 28592, 29664, 28528,
28528, 28816, 28512, 28544, 28592, 28608, 28624, 28640, 28592, 28528
)
batt_samples6 <- c(
28688, 28736, 28544, 28512, 28624, 28816, 27712, 28720, 28656, 28512,
28704, 28672, 28480, 28560, 28608, 28384, 29552, 28272, 28656, 29520,
28784, 28736, 28720, 28640, 28416, 28576, 28640, 28624, 28672, 28672,
28560, 27648, 28608, 28016, 28576, 28544, 28512, 28608, 28528, 28656,
28608, 28400, 28624, 28528, 28640, 28784, 28480, 28688, 28544, 28640,
28528, 28512, 28416, 28480, 28608, 28432, 28528, 28560, 29168, 28336,
28448, 28576, 27488, 28608, 28512, 28672, 28576, 28704, 28544, 28528,
28272, 29632, 28720, 28400, 28736, 28528, 28464, 28544, 28560, 28704,
28560, 28752, 28528, 28496, 28464, 28544, 28624, 28544, 28544, 28400,
28544, 28544, 28576, 28576, 28432, 28176, 28608, 27984, 28528, 28400,
28576, 28480, 26816, 28528, 28784, 29056, 28640, 28864, 28528, 28576,
29056, 28480, 28496, 28784, 26368, 28880, 27936, 28672, 27680, 28736,
28592, 28592, 28560, 28672, 28480, 28496, 28640, 28608, 28576, 28480,
28544, 28608, 29264, 28624, 28560, 29088, 28480, 28624, 28496, 28528,
28608, 28736, 28640, 28720, 28544, 28592, 28608, 28464, 28640, 28576,
28640, 28144, 28576, 29504, 28544, 28544, 28672, 28672, 29616, 28000,
28672, 28576, 28656, 28576, 28592, 28496, 28576, 28608, 28576, 28608,
28624, 28656, 28672, 28400, 28544, 27232, 28688, 28896, 28800, 28224,
28736, 28464, 28512, 28608, 28592, 28624, 28576, 28464, 27888, 28768,
29168, 28512, 28528, 29136, 28608, 28768, 28576, 28624, 28544, 28464,
28656, 28544, 29216, 28624, 27888, 28768, 28400, 28688, 28688, 28528,
28512, 28416, 28432, 28720, 28768, 28624, 28544, 28528, 28752, 28560,
30176, 28400, 28464, 28800, 28608, 28624, 28576, 27648, 28736, 28512,
28576, 27488, 28592, 28528, 29264, 27984, 28656, 27728, 28544, 28608,
28560, 28464, 28560, 28560, 28592, 28592, 28576, 28592, 28624, 28112,
28416, 28592, 28352, 28544, 28544, 28544, 28544, 28640, 28528, 28544,
28640, 28400, 29456, 28576, 29824, 28288, 28544, 28576, 28688, 28720,
28688, 28544, 28528, 28672, 28608, 28544, 28528, 28624, 28528, 28592,
28480, 28416, 28656, 28640, 27984, 28608, 28464, 28576, 28912, 28480,
28224, 28464, 28544, 28752, 28528, 28768, 28448, 28496, 28592, 28608,
30032, 28544, 28560, 28576, 28544, 28576, 28528, 28592, 28096, 28512,
28464, 28608, 28672, 28736, 28576, 28608, 28704, 27904, 28464, 28576,
28624, 28464, 28560, 28528, 28544, 28544, 28576, 28656, 28720, 28512,
27216, 28096, 28400, 29008, 28672, 28576, 28544, 28272, 28592, 28416,
28624, 28608, 28608, 28576, 28528, 28560, 28576, 28512, 28304, 28496,
28496, 28704, 28480, 29296, 28992, 28864, 28560, 27936, 28528, 28560,
28528, 28608, 28544, 28768, 28544, 28608, 28464, 28496, 28688, 29392,
28832, 28464, 27872, 28640, 28672, 28672, 28560, 28560, 28576, 28672,
28544, 28608, 28544, 28592, 28512, 28544, 28672, 28608, 28528, 28736,
28400, 28464, 28624, 28608, 28608, 28608, 27312, 28704, 28560, 28544,
28240, 28640, 28640, 28464, 28384, 28592, 28624, 28512, 28320, 29120,
28416, 28464, 28464, 28496, 28608, 29504, 28672, 28544, 28576, 28560,
28624, 28512, 28672, 28704, 28528, 28720, 28464, 26960, 28608, 28864,
28608, 28464, 28560, 28224, 28512, 28640, 28688, 28560, 28672, 28608,
28544, 28576, 28496, 28464, 29200, 28496, 28704, 28560, 29328, 27600,
28448, 27392, 27936, 28448, 28624, 28592, 28704, 28640, 28624, 28592,
28576, 28832, 28624, 28656, 28560, 28560, 28528, 29312, 28656, 28416,
28544, 28992, 28880, 28592, 27264, 28672, 28528, 28384, 28640, 28512,
28528, 28576, 28592, 28512, 28528, 27904, 28544, 28528, 28544, 28640,
28560, 28672, 26016, 28016, 28512, 28368, 28336, 28528, 28400, 28640,
28480, 28592, 28592, 28624, 28704, 28608, 28464, 28704, 28640, 29568,
28544, 28528, 28608, 28608, 28480, 28384, 28624, 29824, 28368, 28672,
28608, 28448, 28576, 28416, 28336, 28512, 28736, 28688, 28768, 29232,
28672, 28400, 28544, 27696, 28688, 28480, 28672, 28576, 28528, 28464,
28512, 28544, 28640, 26368, 28592, 28704, 28640, 28480, 28576, 28592,
28512, 28128, 28704, 28736, 26752, 28544, 28800, 28512, 28464, 28544,
28480, 28544, 28592, 28512, 28672, 28576, 28560, 28336, 28528, 28336,
28656, 28464, 28640, 28496, 28480, 28528, 28528, 28528, 28528, 28576,
28368, 28496, 28608, 28528, 28592, 28720, 28512, 28272, 28560, 28928,
27600, 28144, 28640, 28896, 27536, 28176, 28624, 28464, 28304, 28608,
27472, 28528, 28496, 28592, 28592, 28544, 28608, 28624, 28592, 28640,
28624, 28496, 28528, 28608, 29568, 27888, 28640, 28608, 28752, 28496,
28464, 28592, 28640, 28576, 28544, 28688, 28672, 28656, 28480, 28592,
28576, 28112, 28368, 29552, 28496, 28480, 28576, 29904, 28592, 29168,
28608, 28576, 28576, 28416, 28672, 28560, 28352, 28560, 28624, 28592,
28416, 28736, 28624, 28640, 28496, 28528, 28528, 28720, 28560, 28704,
27744, 28528, 28544, 28544, 28528, 28576, 28656, 28496, 28592, 28672,
28592, 29152, 26752, 29328, 28768, 28192, 29760, 28608, 28768, 28528,
28576, 28592, 28560, 28704, 27584, 28704, 28672, 28576, 28608, 28496,
28576, 28672, 28672, 29248, 28704, 27792, 28736, 28576, 27920, 28576,
28672, 28544, 28496, 28512, 28640, 28512, 28528, 28576, 28512, 28608,
28560, 28656, 28528, 28736, 28608, 28688, 28496, 28480, 28592, 28928,
28640, 28528, 28976, 28560, 28512, 28528, 28544, 28544, 28608, 28608,
28112, 28704, 28592, 28576, 28448, 28688, 27680, 28096, 28400, 28416,
28672, 27968, 28512, 28656, 28640, 28560, 28464, 28576, 28528, 28608,
28560, 28512, 28544, 28592, 28576, 28592, 28464, 28720, 28464, 28544,
28528, 28592, 29552, 28608, 28576, 29136, 28544, 28576, 28560, 28640,
28480, 28464, 28480, 28576, 28752, 28560, 28240, 28480, 28576, 28368,
27840, 25152, 28848, 28608, 28528, 28560, 28640, 28480, 28608, 28576,
28400, 28720, 28656, 28576, 28400, 28672, 27504, 28400, 28592, 28416,
28064, 28688, 28704, 28464, 28608, 28528, 28528, 28528, 28560, 28624,
28560, 28560, 28688, 28400, 28656, 28144, 28496, 28944, 28576, 28480,
28560, 28672, 28544, 28560, 28496, 28544, 28608, 28608, 28512, 28464,
28592, 28672, 28528, 28208, 28528, 28496, 28928, 28672, 28544, 28672,
28528, 28560, 28608, 28496, 28576, 28560, 27536, 28608, 28624, 28592,
28704, 28672, 28528, 29296, 28560, 28576, 28544, 28928, 28512, 27792,
28528, 28544, 29552, 28576, 28704, 28592, 28288, 28480, 28560, 28592,
28464, 28640, 28672, 28592, 26944, 28528, 28672, 28144, 28608, 28592,
28608, 28560, 28560, 28640, 28560, 28512, 28640, 28720, 28576, 28480,
28592, 28592, 28720, 28528, 28480, 28608, 28752, 28528, 28576, 28608,
28672, 28672, 28576, 28688, 29472, 28544, 29296, 28464, 28496, 28704,
28480, 28544, 28672, 28512, 28544, 28608, 28464, 28656, 28576, 28528,
28496, 28656, 29696, 28496, 29120, 28512, 28352, 28496, 28576, 28640,
28544, 28672, 28560, 28608, 28496, 28496, 28576, 28608, 28544, 28720,
28624, 28528, 28592, 28672, 28464, 28544, 28528, 28656, 28496, 28560,
28480, 28560, 29552, 28320, 28544, 28192, 28768, 28832, 27408, 28848,
28800, 28560, 28560, 28464, 28432, 28688, 27696, 28656, 28880, 28784,
28912, 28608, 28256, 28528, 28672, 28560, 28672, 28608, 28592, 28560,
28416, 28592, 28448, 28640, 28544, 28576, 28032, 28528, 28656, 28560,
28608, 28576, 28560, 28640, 28672, 28528, 28624, 29744, 28592, 28560
)
#############################################
ref_voltage <- 3.3
sample_spacing <- 16 ### for 12bit stored in 16bit unsigned int
graphsets <- list(filename=c("md100nFcap4", "batterys5", "batterys6"),
conditions=c("across 0.1uF capacitor",
paste("across alkaline single cell", c("sample i", "sample ii"))),
mod_sd_factor=c(0.4, 0.25, 0.25),
samples=list(samples4, batt_samples5, batt_samples6))
conv_factor = ref_voltage/65535.0
for (idx in seq(1, length(graphsets$filename))) {
dataname <- graphsets$filename[[idx]]
conditions <- graphsets$conditions[[idx]]
mod_sd_factor <- graphsets$mod_sd_factor[[idx]]
raw_samples <- graphsets$samples[[idx]]
sample_period <- 47.469 * 1e-3
#notes_pos_x <- 10 * 1e-3
notes_pos_x <- sample_period * 15/50
#rc_value <- 1e6 * 2.2 * 1e-6
rc_value <- 1e6 * 0.1 * 1e-6
delay <- 0.005
### These are now set at top of for loop
###conditions <- "during slow capacitor discharge"
###conditions <- "across capacitor"
samples_df <- data.frame(samples=raw_samples,
voltage=raw_samples *conv_factor,
time=seq(0.0, sample_period,
length.out=length(raw_samples)))
model_exp <- nls(voltage ~ I(a * exp((-time - delay) / b)),
data=samples_df, start=list(a=samples_df$voltage[1],
b=rc_value))
model_poly1_pass1 <- nls(voltage ~ I(a * time + b),
data=samples_df,
start=list(a=0, b=ref_voltage/2))
samples_df$res_poly1_pass1 <- residuals(model_poly1_pass1)
### There's a handful around 50ms out
outlier_s <- 0.050
### Calculate some weights to reduce effect of outliers on nls (s for squares)
### take the distance away from the outlier_s based on first fitted line
### and cap that, scale to 4.99 then cap those values at 4.0
samples_df$w1 <- pmin((outlier_s - pmin(outlier_s,
abs(samples_df$res_poly1_pass1))) * 4.99/outlier_s,
4.0)
model_poly1_pass2 <- nls(voltage ~ I(a * time + b),
data=samples_df,
weights=samples_df$w1,
start=list(a=0, b=ref_voltage/2))
### plot graph with points, curve, both lines.
### plot same graph and zoom in on vertical
### TODO - colours are terrible
### yellow points are barely visible
### TODO clean-up legend
### TODO add annotation bottom left with
### mean, mean(fitted exp()), mean(fitted line), mean(fitted line (weighted))
### Do i care about Q-Q?
###
mean_v = mean(samples_df$voltage)
mean_curve_v = mean(predict(model_exp, list(x=samples_df$time)))
mean_line_v = mean(predict(model_poly1_pass1, list(x=samples_df$time)))
mean_weighted_v = mean(predict(model_poly1_pass2, list(x=samples_df$time)))
### Hacky but I want to use geom_label
max_v = max(samples_df$voltage)
min_v = min(samples_df$voltage)
NApadding = rep(NA, dim(samples_df)[1] - 1)
samples_df$label_x <- c(notes_pos_x, NApadding)
samples_df$label_y <- c(min_v + (max_v - min_v) * 0.15, NApadding)
samples_df$label <- c(sprintf("means \n samples=%.4f\ncurve=%.4f\nline=%.4f\n weighted line=%.4f",
mean_v, mean_curve_v, mean_line_v, mean_weighted_v),
NApadding)
g1 <- ggplot(samples_df, aes(x=time)) +
ggtitle(paste("P1 pad voltage", conditions)) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, 'char'),
panel.grid.minor.y=element_blank()) +
scale_x_continuous(labels=function(x) { x * 1000.0 }) +
labs(x="time (ms)", y="voltage (V)") +
geom_point(aes(y=voltage,
color=cut(samples_df$w1, c(-1, 1, 2, 3, 3.8, 4)),
size=5.5 - samples_df$w1)) +
geom_line(aes(y=predict(model_exp, list(x=time)), color="curve"),
lwd=6, alpha=0.75) +
geom_line(aes(y=predict(model_poly1_pass1, list(x=time)), color="line"),
linetype="dashed", lwd=3, alpha=0.5) +
geom_line(aes(y=predict(model_poly1_pass2, list(x=time)), color="weighted"),
linetype="twodash", lwd=3, alpha=0.5) +
guides(size=FALSE, linetype=FALSE) +
scale_color_manual(values=c("(-1,1]" ="red",
"(1,2]" = "orangered",
"(2,3]" = "orange",
"(3,3.8]" = "gray80",
"(3.8,4]" = "gray30",
"curve" = "grey",
"line" = "purple",
"weighted" = "green"
),
labels=c("curve" = "curve ",
"line" = "line ",
"weighted" = "weighted line "),
breaks=c("curve", "line", "weighted")) +
geom_label(aes(y=label_y, x=label_x, label=label),
hjust=1, size=10, fill="white",
label.padding = unit(0.65, "char"),
alpha=0.75
)
# this shows the line a bit more clearly - also shows the discrete nature of samples
headroom <- 0.000 ### 0mV
g2 <- g1 +
geom_point(aes(y=voltage,
color=cut(samples_df$w1, c(-1, 1, 2, 3, 3.8, 4))),
size=2, shape=8) +
coord_cartesian(ylim=c(min(predict(model_poly1_pass1, list(x=samples_df$time)),
predict(model_poly1_pass2, list(x=samples_df$time))
) - headroom,
max(predict(model_poly1_pass1, list(x=samples_df$time)),
predict(model_poly1_pass2, list(x=samples_df$time))
) + headroom))
st <- shapiro.test(samples_df$res_poly1_pass1)
g3 <- ggplot(samples_df, aes(x=time)) +
ggtitle(paste("P1 pad voltage", conditions),
subtitle="Residuals from first fitted line"
) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, 'char'),
panel.grid.minor.y=element_blank()) +
scale_x_continuous(labels=function(x) { x * 1000.0 }) +
labs(x="time (ms)", y="voltage (mV)") +
geom_point(aes(y=res_poly1_pass1*1000,
color=cut(samples_df$w1, c(-1, 1, 2, 3, 3.8, 4))),
size=3.5) +
# guides(size=FALSE, linetype=FALSE) +
scale_color_manual(values=c("(-1,1]" ="red",
"(1,2]" = "orangered",
"(2,3]" = "orange",
"(3,3.8]" = "gray80",
"(3.8,4]" = "gray30",
"curve" = "grey",
"line" = "purple",
"weighted" = "green"
),
labels=c("curve" = "curve ",
"line" = "line ",
"weighted" = "weighted line "),
breaks=c("curve", "line", "weighted"))
### The joys of working around dynamic scoping for binw
### there's probably a more elegant way of doing this
mkdnorm <- function(dat, binw, hfactor, sdfactor, meanoffset) {
l_dat <- dat
l_binw <- binw
l_hfactor <- hfactor
l_sdfactor <- sdfactor
l_meanoffset <- meanoffset
return (function(x) {
dnorm(x,
mean = mean(l_dat) + l_meanoffset,
sd = sd(l_dat * l_sdfactor)) * l_binw * length(l_dat) * l_hfactor })
}
mid_sample <- round(mean(samples_df$samples) / sample_spacing) * sample_spacing
sdoffset <- round((mean(samples_df$samples) + sd(samples_df$samples)) /
sample_spacing) * sample_spacing - mid_sample
mid_plus_sd_sample = mid_sample + sdoffset
mid_minus_sd_sample = mid_sample - sdoffset
lfunc <- mkdnorm(samples_df$samples, 1,
sample_spacing, 1, 0)
lfunc_s2h2 <- mkdnorm(samples_df$samples, 1,
sample_spacing, mod_sd_factor, 0) ### 0.4 or 0.3 can look better
g4prequel <- ggplot(samples_df, aes(x=samples)) +
ggtitle(paste("P1 pad samples", conditions),
subtitle="outliers not shown") +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, "char"),
panel.grid.minor.y=element_blank()) +
theme(legend.key.width = unit(3,"char")) +
### unclear why I need to set width here to get constant width
geom_bar(aes(
fill=factor(ifelse(samples==mid_sample,
"mid",
ifelse(samples==mid_plus_sd_sample |
samples==mid_minus_sd_sample, "sds", "rest")))),
width=sample_spacing * 0.8) +
scale_x_continuous(labels=function(x) { sprintf("%d\n(%.1fmV)", x, x * conv_factor * 1000.0) } ) +
labs(x="samples") +
stat_function(aes(linetype="norm"),
fun=lfunc, color="blue3", n=301, lwd=1.25) +
stat_function(aes(linetype="modnorm"),
fun=lfunc_s2h2, color="blue", n=501, lwd=1.25) +
guides(fill = guide_legend(order = 1),
linetype = guide_legend(order = 2)) +
scale_linetype_manual(name=NULL,
values=c("norm" = "dashed",
"modnorm" = "solid"
),
labels=c("norm" = "normal distribution ",
"modnorm" = "modified sd normal dist. "),
breaks=c("norm", "modnorm")) +
scale_fill_manual(name=NULL,
values=c("mid"="orangered2",
"sds"="orange2",
"rest"="grey60",
"norm"="dashed",
"modnorm"="solid"
),
labels=c("mid"="mean ",
"sds"="+/-1 sd "),
breaks=c("mid",
"sds")
) +
### Add lines in case values aren't present, issue on the sds
geom_vline(xintercept=c(mid_minus_sd_sample, mid_plus_sd_sample),
colour="orange2", lwd=sample_spacing / 8, linetype="dotted", alpha=0.5) +
geom_vline(xintercept=mid_sample,
colour="orangered2", lwd=sample_spacing / 8, linetype="dotted", alpha=0.75) +
coord_cartesian(xlim=c(mid_sample-sample_spacing*50,
mid_sample+sample_spacing*50))
### Histogram time - do several to give option to animate them
### Fixing the x width makes a lot more sense for animation comparison
### but letting the y axis auto-range works well
binwidthds_mv = c(8, 4, 2, 1, 0.5)
ghists <- vector("list", length(binwidthds_mv))
idx <- 1
for (binw in binwidthds_mv) {
lfunc <- mkdnorm(samples_df$res_poly1_pass1*1000, binw,
1, 1, 0)
lfunc_s2h2 <- mkdnorm(samples_df$res_poly1_pass1*1000, binw,
1.0, mod_sd_factor, 0) ### 0.4 looks better on central part
gh <- ggplot(samples_df, aes(x=time)) +
ggtitle(paste("P1 pad voltage", conditions),
subtitle="Histogram of residuals from first fitted line"
) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, "char"),
panel.grid.minor.y=element_blank()) +
theme(legend.key.width = unit(3,"char")) +
geom_histogram(aes(res_poly1_pass1*1000, fill="line"),
color="black",
binwidth=binw) +
guides(fill=FALSE) +
scale_y_continuous(labels=function(x) { sprintf("%04d", x) } ) +
labs(x="voltage (mV)") +
### n is usually 101 which looks poor on the modified normal
stat_function(aes(linetype="norm"),
fun=lfunc, color="blue3", n=301, lwd=1.25, show.legend=TRUE) +
stat_function(aes(linetype="modnorm"),
fun=lfunc_s2h2, color="blue", n=501, lwd=1.25, show.legend=TRUE) +
scale_linetype_manual(values=c("norm" = "dashed",
"modnorm" = "solid"),
labels=c("norm" = "normal distribution ",
"modnorm" = "modified sd normal dist. "),
breaks=c("norm", "modnorm")) +
coord_cartesian(xlim=c(min(samples_df$res_poly1_pass1)*1000,
max(samples_df$res_poly1_pass1)*1000))
ghists[[idx]] <- gh
idx <- idx + 1
}
binw <- 30
samples_df$wslabel_x <- c(3, NApadding)
min_res <- min(samples_df$res_poly1_pass1)
max_res <- max(samples_df$res_poly1_pass1)
samples_df$wslabel_y <- c(min_res + (max_res - min_res) * 0.15, NApadding) * 1000.0
samples_df$wslabel <- c(sprintf(" %s\n W=%.3f\n p-value=%.4e",
st$method, st$statistic, st$p.value),
NApadding)
g5 <- ggplot(samples_df, aes(sample = res_poly1_pass1*1000, color="line")) +
ggtitle(paste("P1 pad voltage", conditions),
subtitle="Q-Q of fitted line residuals vs normal distribution"
) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, 'char'),
panel.grid.minor.y=element_blank()) +
stat_qq(shape=21, size=4, show.legend=FALSE) +
stat_qq(shape=21, size=5, color="black", show.legend=FALSE) +
stat_qq_line(lwd=2, show.legend=FALSE) +
geom_label(aes(y=wslabel_y, x=wslabel_x, label=wslabel),
hjust=1, size=10, color="black", fill="white",
label.padding = unit(0.65, "char"),
alpha=0.8
)
### pp plot based on tips on
### https://homepage.divms.uiowa.edu/~luke/classes/STAT4580/qqpp.html
### Feels like i should clip the abline but probably needs use of segment
m <- mean(samples_df$res_poly1_pass1)
s <- sd(samples_df$res_poly1_pass1)
n <- nrow(samples_df)
p <- (1 : n) / n - 0.5 / n
g6 <- ggplot(samples_df) +
ggtitle(paste("P1 pad voltage", conditions),
subtitle="P-P of fitted line residuals vs normal distribution"
) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, 'char'),
panel.grid.minor.y=element_blank()) +
labs(x="theoretical cumulative distribution (normal)", y="cumulative distribution of samples") +
geom_point(aes(x=p, y=pnorm(sort(res_poly1_pass1), m, s))) +
geom_abline(aes(color="aline", slope=1, intercept=0), lwd=2, show.legend=FALSE)
#g1
filebase <- paste0(dataname, "-")
filename <- "samples-fitted-lines"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g1,
dpi=100, height=12, width=16, units="in")
filename <- "samples-fitted-lines-zoom"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g2,
dpi=100, height=12, width=16, units="in")
filename <- "fitted-line-residuals"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g3,
dpi=100, height=12, width=16, units="in")
filename <- "central-samples"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g4prequel,
dpi=100, height=12, width=16, units="in")
idx <- 1
for (bins in binwidthds_mv) {
filename <- sprintf("fitted-line-residuals-histogram-%03d", idx)
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
ghists[[idx]],
dpi=100, height=12, width=16, units="in")
idx <- idx + 1
}
filename <- "fitted-line-residuals-qq"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g5,
dpi=100, height=12, width=16, units="in")
filename <- "fitted-line-residuals-pp"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g6,
dpi=100, height=12, width=16, units="in")
}
combo_df <- data.frame(samples=c(#(samples1-mean(samples1)) * conv_factor,
#(samples2-mean(samples2)) * conv_factor,
#(samples3-mean(samples3)) * conv_factor,
(samples4-mean(samples4)) * conv_factor,
(batt_samples5-mean(batt_samples5)) * conv_factor,
(batt_samples6-mean(batt_samples6)) * conv_factor),
id=c(#rep("sample1", 1000),
#rep("sample2", 1000),
#rep("sample3", 1000),
rep("sample4", 1000),
rep("sample5", 1000),
rep("sample6", 1000))
)
### Playing around with violin plots
## ggplot(combo_df, aes(x=id, y=samples*1000, fill=id)) + geom_violin() + geom_boxplot(width=0.2, fill="white") + coord_cartesian(ylim=c(-25,25))
| /clue-metal-detector/graphing/adc-sample-analysis-v6.R | permissive | kevinjwalters/mini-projects | R | false | false | 63,211 | r | library('stringr')
library('ggplot2')
library('grid')
library('gridExtra')
### Plot some ADC samples from a capacitor discharge or constant
### charge/distcharge looking at curve/line fitting and distribution
### of the samples
### This could be a lot neater
### A (different) example fo what was executed
###
### >>> [pulse.send(pulses) in range(10)] ; print(mean1000samples_alt2(p1))
### [False]
### 10470.7
### >>> pulses
### array('H', [40000, 500])
### 1000 ADC values from CP 5.0.0 on a CLUE (nRF52840 12 bit) Copied in from REPL
### samples 1 2.2uF non-continuous
samples1 <- c(
10592, 10624, 10544, 10624, 10656, 10592, 10544, 10560, 10576, 10576,
10544, 10544, 10640, 10640, 10592, 10480, 10560, 10608, 10592, 10352,
10592, 10336, 10544, 10528, 10608, 10592, 10544, 10432, 10608, 10496,
10528, 10640, 10656, 10544, 10656, 10528, 10560, 10624, 10592, 10576,
10560, 10448, 10112, 10624, 10640, 10624, 10560, 10512, 10496, 10624,
10512, 10512, 10624, 10752, 10480, 10544, 10544, 10560, 10784, 10480,
10528, 10640, 10528, 10496, 10704, 10592, 10480, 10560, 10592, 10512,
10576, 10384, 10624, 10624, 10496, 10512, 10480, 10528, 10608, 10528,
10576, 10496, 10512, 10480, 10336, 10736, 10544, 10480, 10496, 10416,
10624, 10544, 10544, 10576, 10576, 10560, 10528, 10528, 10512, 10560,
10560, 10528, 10560, 10496, 10144, 10496, 10832, 10528, 10560, 10496,
10560, 10592, 10496, 10432, 10480, 10560, 10480, 10544, 10480, 10544,
10496, 10512, 10544, 10512, 10528, 10560, 10512, 10544, 10544, 10496,
10512, 10496, 10368, 10480, 10528, 10480, 10496, 10560, 10496, 10544,
10496, 10432, 10512, 10544, 10528, 10544, 10560, 10560, 10544, 10592,
10448, 10400, 10512, 10560, 10528, 10528, 10480, 10496, 10512, 10464,
10464, 10480, 10464, 10480, 10480, 10496, 10560, 10496, 10464, 10448,
10448, 10528, 10496, 10496, 10464, 10480, 10560, 10496, 10496, 10640,
10560, 10512, 10480, 10512, 10496, 10480, 10480, 10544, 10464, 10480,
10624, 10272, 10528, 10496, 10464, 10800, 10496, 10528, 10448, 10560,
10512, 10496, 10464, 10528, 10480, 10576, 10480, 10560, 10480, 10576,
10480, 10512, 10608, 10448, 10496, 10480, 10512, 10752, 10480, 10480,
10480, 10480, 10480, 10464, 10480, 10544, 10464, 10432, 10608, 10384,
10368, 10512, 10512, 10480, 10528, 10272, 10336, 10448, 10480, 10528,
10416, 10464, 10448, 10416, 10480, 10576, 10512, 10496, 10512, 10528,
10464, 10432, 10464, 10512, 10432, 10544, 10432, 10448, 10480, 10448,
10432, 10464, 10352, 10432, 10480, 10464, 10528, 10480, 10416, 10480,
10480, 10480, 10496, 10416, 10480, 10288, 10512, 10448, 10448, 10464,
10304, 10384, 10480, 10448, 10480, 10624, 10496, 10448, 10368, 10496,
10560, 10464, 10528, 10336, 10416, 10496, 10416, 10416, 10496, 10432,
10352, 10416, 10496, 10496, 10432, 10560, 10416, 10528, 10320, 10464,
10512, 10496, 10496, 10400, 10416, 10496, 10544, 10320, 10304, 10496,
10352, 10400, 10512, 10352, 10464, 10384, 10400, 10496, 10480, 10416,
10464, 10512, 10528, 10352, 10448, 10416, 10416, 10480, 10496, 10560,
10416, 10400, 10416, 10448, 10480, 10320, 10400, 10720, 10416, 10416,
10464, 10448, 10432, 10464, 10448, 10384, 10416, 10432, 10416, 10432,
10352, 10416, 10352, 10224, 10416, 10464, 10464, 10496, 10416, 10128,
10432, 10720, 10480, 10416, 10448, 10416, 10464, 10368, 10432, 10480,
10432, 10464, 10496, 10480, 10464, 10432, 10352, 10400, 10496, 10416,
10464, 10448, 10368, 10400, 10496, 10224, 10400, 10480, 10464, 10432,
10448, 10416, 10416, 10496, 10496, 10464, 10416, 10400, 10368, 10384,
10432, 9968, 10192, 10416, 10400, 10480, 10512, 10480, 10432, 10432,
10480, 10352, 10432, 10416, 10512, 10384, 10176, 10752, 10528, 10192,
10352, 10432, 10384, 10480, 10448, 10368, 10160, 10368, 10448, 10528,
10448, 10352, 10320, 10416, 10400, 10384, 10336, 10432, 10368, 10384,
10432, 10432, 10448, 10352, 10384, 10416, 10496, 10496, 10496, 10256,
10080, 10624, 10448, 10384, 10336, 10448, 10400, 10432, 10400, 10432,
10416, 10352, 10368, 10416, 10224, 10352, 10352, 10432, 10336, 10432,
10288, 10464, 10432, 10240, 10432, 10352, 10400, 10432, 10416, 10464,
10288, 10432, 10464, 10352, 10416, 10384, 10416, 10432, 10448, 10448,
10448, 10656, 10480, 10400, 10432, 10432, 10416, 10432, 10480, 10464,
10352, 10432, 10464, 10384, 10528, 10432, 10480, 10496, 10416, 10496,
10416, 10496, 10368, 10416, 10800, 10400, 10352, 10576, 10384, 10432,
10448, 10448, 10448, 10432, 10496, 10416, 10496, 10320, 10384, 10480,
10416, 10336, 10416, 10336, 10352, 10400, 10368, 10288, 9792, 10320,
10368, 10352, 10400, 10320, 10432, 10160, 10384, 10432, 10432, 10368,
10432, 10352, 10304, 10416, 10432, 10368, 10352, 10432, 10352, 10496,
10352, 10384, 10464, 10432, 10400, 10400, 10416, 10336, 10432, 10224,
10592, 10368, 10432, 10352, 10384, 10368, 10432, 10432, 10416, 10352,
10272, 10416, 10384, 10448, 10368, 10352, 10304, 10240, 10400, 10528,
10416, 10352, 10368, 10416, 10448, 10464, 10368, 10256, 10400, 10400,
10352, 10368, 10432, 10352, 10432, 10448, 10368, 10336, 10352, 10432,
10448, 10544, 10368, 10496, 10384, 10240, 10368, 10256, 10352, 10288,
10320, 10304, 10384, 10352, 10400, 10432, 10384, 10384, 10352, 10352,
10368, 10384, 10352, 10368, 10752, 10368, 10368, 10416, 10336, 10224,
10400, 10400, 10432, 10416, 10496, 10432, 10432, 10384, 10304, 10336,
10336, 10368, 10384, 10384, 10368, 10496, 10304, 10384, 10384, 10384,
10352, 10304, 10432, 10352, 10272, 10352, 10416, 10368, 10512, 10336,
10432, 10304, 10336, 10336, 10368, 10368, 10368, 10320, 10368, 10352,
10400, 10304, 10352, 9968, 10352, 10288, 10272, 10368, 10320, 10464,
10368, 10320, 10336, 10432, 10656, 10368, 10368, 10336, 10384, 10336,
10336, 10352, 10336, 10224, 10368, 10384, 10336, 10336, 10336, 10320,
10384, 10352, 10368, 10320, 10272, 10224, 10352, 10368, 10320, 10320,
10352, 10304, 10352, 10352, 10224, 10352, 10288, 10288, 10416, 10352,
10432, 10368, 10304, 10112, 10288, 10352, 10400, 10320, 10352, 10352,
10320, 10208, 10512, 10288, 10208, 10320, 10272, 10304, 10368, 10352,
10368, 10304, 10352, 10336, 10288, 10272, 10336, 10368, 10288, 10560,
10240, 10320, 10464, 10432, 10304, 10288, 10304, 10256, 10304, 10320,
10368, 10272, 10368, 10256, 10224, 10352, 10304, 10256, 10320, 10288,
10336, 10288, 10320, 10224, 10320, 10320, 10400, 10368, 10304, 10384,
10368, 10320, 10320, 10304, 10288, 10368, 10336, 10416, 10320, 10304,
10336, 10288, 10304, 10352, 10352, 10304, 10256, 10352, 10240, 10288,
10304, 10256, 10304, 10400, 10288, 10384, 10272, 10320, 10336, 10224,
10368, 10256, 10368, 10304, 10272, 10288, 10432, 10352, 10288, 10336,
10288, 10368, 10304, 9552, 10320, 10336, 10288, 10384, 10320, 10272,
10320, 10368, 10256, 10368, 10336, 10368, 10288, 10352, 10352, 10288,
10512, 10320, 10320, 10352, 10336, 10272, 10160, 10336, 10288, 10272,
10336, 10288, 10416, 10208, 10304, 10272, 10320, 10288, 10336, 10400,
10304, 10368, 10352, 10336, 10208, 10288, 10352, 10240, 10336, 10352,
10288, 10288, 10288, 10288, 10352, 10272, 10272, 10224, 10176, 10224,
10224, 10224, 10272, 10384, 10368, 10208, 10320, 10320, 10320, 10336,
10304, 9696, 10304, 10400, 10304, 10304, 10256, 10256, 10288, 10272,
10304, 10272, 10240, 10320, 10304, 10240, 10208, 10272, 10208, 10336,
10320, 10320, 10224, 10208, 10224, 10320, 10288, 10256, 10208, 10320,
10240, 10224, 10224, 10352, 10384, 10336, 10336, 10304, 10304, 10256,
10304, 10304, 10256, 10368, 10288, 10288, 10288, 10304, 10368, 10240,
10272, 10304, 10240, 10320, 10160, 10320, 10240, 10304, 10208, 10160,
10320, 10256, 10224, 10288, 10224, 10224, 10256, 10304, 10208, 10240,
10304, 10288, 10368, 10336, 10256, 10176, 10304, 10288, 10240, 10224,
10272, 10224, 10288, 10256, 10320, 10224, 10352, 10272, 10288, 10224)
### samples2 is 0.1uF ceramic/mylar with continuous pulsing at 400kHz 55000 duty
###
### >>> discard = [print(" ", ", ".join(map(str,sample_store_list[x:x+10])), end=",\n") for x in range(0,1000,10)]
samples2 <- c(
23808, 23712, 23680, 23696, 23696, 23712, 23696, 23712, 23696, 23648,
23472, 23712, 23520, 23616, 23744, 23568, 23680, 23200, 23680, 23744,
23712, 23760, 23152, 24208, 23984, 23456, 23280, 23664, 24256, 24528,
23664, 23600, 23696, 23616, 23712, 23712, 24128, 23680, 23744, 23584,
23936, 23600, 23552, 23936, 23648, 23840, 24448, 22928, 23664, 23360,
24304, 23568, 23744, 23616, 23632, 23664, 23632, 23664, 23664, 23664,
23776, 23776, 23744, 23424, 24128, 23872, 23552, 23600, 23728, 23696,
23712, 23520, 23536, 23872, 23584, 23664, 23936, 23632, 23744, 23344,
23680, 23712, 23664, 23744, 23664, 23600, 23600, 23680, 23648, 23856,
23664, 23616, 23840, 23248, 24000, 23696, 23712, 23632, 24352, 23664,
23664, 23776, 23616, 23584, 23600, 23712, 23664, 23632, 23888, 23856,
23472, 23568, 23600, 23744, 23664, 23728, 23664, 23728, 23584, 23600,
23872, 23696, 23440, 23568, 23504, 23712, 24336, 23744, 23712, 23760,
23600, 23712, 23696, 23616, 23776, 23680, 23600, 23712, 23664, 23728,
23680, 23712, 23488, 23616, 22864, 23696, 23680, 23584, 23680, 23632,
23808, 23776, 23552, 23664, 23680, 23744, 23680, 23664, 23488, 23760,
23744, 23808, 23536, 23728, 23712, 23408, 23648, 23744, 23712, 23728,
23536, 23680, 23520, 23680, 23600, 23600, 23712, 23760, 23520, 23664,
23664, 23616, 23744, 23744, 23600, 23712, 23488, 23744, 23728, 23088,
23616, 23600, 23664, 24400, 23664, 23936, 23776, 23472, 23872, 24112,
23472, 23680, 23728, 23664, 23536, 23664, 23568, 23664, 23680, 23552,
23648, 23712, 23584, 23552, 23584, 23696, 23728, 23680, 23616, 23664,
23600, 23824, 23504, 23680, 23360, 23232, 23680, 23904, 24320, 23744,
23664, 23664, 23728, 23648, 23648, 23680, 23648, 23664, 23616, 23696,
23696, 23536, 23760, 23648, 23648, 23776, 23696, 23792, 23536, 23504,
23904, 23552, 23664, 24272, 23456, 23472, 23168, 23680, 23712, 23696,
23568, 23552, 23632, 23664, 23648, 23504, 23712, 24016, 23888, 23824,
23744, 23632, 23664, 23696, 23680, 23744, 23664, 23600, 23744, 23456,
23552, 23600, 23664, 23808, 23664, 23664, 23376, 23728, 23440, 23728,
23424, 23680, 24320, 23616, 23824, 23520, 23536, 23584, 23520, 23680,
23680, 23600, 23840, 23680, 23744, 23312, 23168, 23536, 23264, 23712,
23712, 23648, 23120, 23664, 23584, 23664, 23600, 23520, 23696, 23696,
22432, 23680, 23728, 23456, 23648, 23664, 23776, 23600, 23664, 23632,
23712, 22592, 23568, 23344, 23632, 24064, 23616, 22880, 23968, 23568,
23008, 23792, 23488, 24240, 23648, 23680, 23504, 23712, 23808, 23552,
23776, 23600, 23456, 23408, 24352, 23040, 23744, 23552, 23760, 23680,
23600, 23664, 23632, 23264, 23680, 23648, 23296, 23520, 23840, 23344,
23632, 23680, 23664, 23712, 23728, 23488, 23632, 23648, 23696, 23664,
23616, 23696, 23744, 23616, 23632, 23664, 23600, 23680, 23584, 23744,
23504, 23824, 23600, 23632, 23536, 23680, 23712, 23376, 24096, 23600,
23456, 23728, 23504, 23744, 23632, 23712, 23664, 23600, 23664, 23808,
23504, 23648, 23760, 23968, 23616, 24000, 23840, 23680, 23696, 23808,
24208, 23600, 23600, 23680, 23888, 23312, 23488, 23728, 23456, 23744,
23648, 23696, 23648, 23680, 23728, 23552, 23680, 23536, 23808, 23632,
23664, 23648, 23680, 23920, 23648, 23152, 23712, 23280, 23632, 23712,
23664, 23664, 23664, 23728, 23808, 23664, 23712, 23600, 23744, 23664,
23664, 23760, 23664, 23552, 23680, 23680, 23680, 22768, 23680, 23808,
23600, 24272, 23600, 23344, 23680, 23968, 23584, 23664, 23536, 23552,
23872, 23664, 23824, 23968, 24064, 23632, 23632, 23712, 23568, 23728,
23744, 23600, 23632, 23888, 23232, 23664, 23632, 23744, 23648, 23344,
23616, 23632, 23712, 23744, 23600, 23664, 23552, 23712, 23680, 23664,
23664, 23680, 23696, 23760, 23760, 23552, 23744, 23600, 23488, 23376,
23600, 24096, 23712, 23584, 23616, 23744, 23664, 23584, 23600, 23840,
23760, 23744, 23664, 23568, 23728, 23616, 23648, 23680, 23280, 23744,
23632, 23536, 23760, 23616, 23664, 23648, 23696, 23680, 23616, 23616,
23488, 23648, 24112, 23200, 23648, 23840, 23520, 23712, 23600, 23552,
23664, 23664, 23728, 23664, 23712, 23680, 23712, 23520, 23584, 23680,
23696, 23680, 23632, 23680, 23664, 23536, 23584, 23680, 23568, 23472,
23680, 23680, 23664, 23680, 23648, 23616, 23568, 23712, 23408, 23600,
23696, 23712, 24080, 23552, 23152, 23728, 23680, 23824, 23696, 23664,
23488, 23760, 23808, 23472, 23552, 23616, 23680, 23760, 23696, 23408,
23904, 23792, 23360, 23408, 23664, 23808, 23648, 23408, 23760, 23568,
23488, 23632, 23728, 23680, 23632, 23664, 23536, 23760, 23744, 23328,
23744, 23552, 24448, 23536, 23488, 23840, 21808, 24176, 23696, 23424,
23696, 23104, 23504, 23728, 23696, 23744, 23728, 23680, 23648, 23632,
23872, 23520, 23520, 23824, 23648, 24000, 23680, 24416, 23680, 23520,
23536, 23648, 23680, 23680, 23680, 23680, 23680, 23536, 23552, 23744,
23600, 23632, 23536, 23792, 23680, 23680, 23664, 23744, 23472, 23648,
23664, 23680, 23712, 24128, 23680, 23488, 23552, 23664, 23664, 23696,
23664, 23232, 23456, 23344, 23968, 23504, 23584, 23664, 23664, 23968,
22976, 23632, 23648, 23504, 23536, 23664, 23680, 23744, 23664, 23728,
23664, 23520, 23536, 23792, 23792, 23680, 23600, 23392, 23616, 23664,
24112, 23472, 23456, 23808, 23680, 23696, 23664, 23664, 23680, 23520,
23536, 23248, 23664, 23648, 23552, 23392, 23680, 23840, 23648, 24000,
23680, 23600, 23664, 23664, 23680, 23680, 23648, 23136, 23584, 23616,
24000, 23936, 23648, 23728, 23728, 23536, 23536, 23600, 23712, 23728,
23664, 23680, 23712, 23760, 23616, 23472, 23520, 23600, 23680, 23856,
23664, 23888, 24608, 23552, 23552, 23600, 23664, 23696, 23680, 24208,
23616, 23472, 23600, 24288, 24272, 23680, 23696, 23680, 23632, 23136,
23696, 23616, 23568, 23696, 23680, 23648, 23616, 23712, 23744, 23216,
23552, 23296, 23616, 23856, 23680, 23712, 23664, 23536, 23536, 23568,
23536, 23728, 23712, 23664, 23680, 23648, 23632, 23568, 23536, 23664,
23696, 23776, 23600, 23664, 23648, 23232, 23552, 23616, 23616, 23648,
23552, 24336, 23632, 23680, 23328, 23536, 23344, 24144, 23648, 24096,
23680, 23536, 23648, 23744, 23552, 23760, 23664, 23616, 23632, 23632,
23680, 23552, 23616, 23696, 23616, 23712, 23648, 23680, 23440, 23632,
23840, 23488, 23584, 23344, 23664, 23552, 23632, 23664, 23824, 23488,
23584, 23712, 23680, 23648, 23664, 23616, 23712, 22336, 23616, 23632,
23760, 23680, 23712, 23312, 23728, 23680, 23744, 23200, 23536, 23856,
23296, 22944, 23632, 23792, 23632, 23520, 23504, 23664, 23632, 23648,
23616, 23664, 23680, 23584, 23616, 23504, 23168, 23616, 23680, 22608,
23664, 23632, 23632, 23552, 23568, 23680, 23616, 23680, 23680, 23664,
23248, 23536, 23616, 23424, 23728, 23600, 23744, 23728, 23808, 23472,
23264, 23488, 23088, 23664, 22912, 24000, 23408, 23680, 23616, 23472,
23472, 23696, 23568, 23664, 23696, 23792, 23712, 23856, 23536, 23888,
23536, 23920, 23568, 23792, 23600, 23680, 23648, 23664, 23536, 23664,
23744, 23648, 23616, 23600, 23664, 23536, 23536, 23728, 23648, 23664,
23808, 23616, 23408, 23920, 23712, 23376, 23552, 23808, 23648, 23696,
23600, 23328, 23536, 23552, 23280, 23728, 23776, 23536, 23712, 23568,
23600, 23264, 23488, 23680, 23664, 23600, 23712, 23680, 23616, 23648
)
### same as samples2
samples3 <- c(
23664, 23648, 23680, 23568, 23472, 23552, 23808, 23680, 23648, 23664,
23712, 23440, 23232, 24048, 23712, 23696, 23696, 23600, 23664, 23664,
23664, 23808, 23568, 23536, 23744, 23312, 23840, 23680, 23712, 23664,
23680, 23648, 23664, 23664, 23584, 23664, 23744, 23712, 23744, 23536,
23888, 23328, 23344, 23632, 23584, 23744, 23632, 23712, 23824, 23584,
23664, 23632, 23504, 23584, 23712, 23680, 23504, 23600, 23600, 23648,
23408, 23360, 23680, 23648, 22080, 23664, 23600, 23712, 23664, 23680,
23648, 23696, 23584, 23584, 23680, 23664, 23792, 23648, 22912, 23648,
23488, 23488, 23616, 23648, 23520, 23632, 23664, 23648, 23936, 23616,
23680, 23392, 23904, 23680, 23616, 23616, 23600, 23744, 23712, 23488,
23600, 23872, 23936, 23664, 23936, 23712, 23760, 23504, 23712, 23568,
23680, 23616, 23600, 23632, 23648, 23648, 23760, 23616, 23920, 23680,
23680, 23760, 23744, 23648, 23840, 23584, 23584, 23856, 23376, 23424,
23648, 23920, 23696, 23792, 23680, 23552, 23760, 23520, 23552, 23728,
23680, 23712, 23680, 23632, 23696, 23696, 23568, 23712, 23616, 23696,
23680, 23600, 23648, 23632, 23616, 23728, 24176, 23792, 23472, 24160,
23680, 23488, 23664, 23744, 23696, 23456, 23344, 23600, 23632, 23632,
23616, 23904, 23664, 23536, 23504, 23680, 23648, 23648, 23632, 23728,
23696, 23632, 23520, 23392, 23664, 23744, 23680, 23648, 23664, 23744,
23536, 23536, 22640, 23104, 23664, 23632, 23680, 23344, 23456, 23376,
23680, 23664, 23808, 23616, 23552, 23632, 23744, 23776, 23600, 23584,
23600, 23664, 23840, 23664, 22992, 23536, 23552, 23552, 23584, 23632,
23584, 23712, 23520, 23792, 23712, 23712, 23584, 23296, 23776, 23616,
23280, 23664, 23568, 23696, 23648, 23552, 23680, 23680, 23648, 24192,
24352, 23776, 23664, 23584, 23792, 23632, 23600, 23680, 23680, 23824,
23712, 23552, 23744, 23728, 23536, 23200, 23616, 23728, 23504, 23536,
23648, 23808, 23552, 23680, 23696, 23648, 23168, 23568, 23744, 23600,
23648, 23632, 23616, 23584, 24272, 23584, 23616, 23696, 23568, 23680,
23840, 23744, 23632, 24240, 23616, 23776, 23568, 23680, 23744, 23648,
23648, 23648, 23680, 23680, 23696, 23712, 23696, 23568, 23728, 23648,
23616, 23728, 23888, 23616, 23696, 23936, 23616, 24064, 23648, 23536,
23504, 23648, 23584, 23648, 23616, 23648, 23680, 23936, 23584, 23680,
23568, 24368, 23504, 23664, 23488, 24000, 23904, 23536, 23552, 23696,
23312, 23584, 23744, 23600, 23696, 23648, 23552, 23584, 23680, 23536,
23696, 23648, 23552, 23728, 23536, 23616, 23664, 23872, 23600, 24080,
23200, 23936, 23360, 23536, 23616, 23776, 23696, 23632, 23632, 23680,
23648, 23680, 23712, 23152, 23280, 23632, 23568, 23504, 23664, 23680,
23712, 23632, 23568, 23664, 23568, 23664, 23632, 23664, 23680, 23616,
23664, 23344, 23552, 23232, 23504, 23680, 23600, 23664, 23552, 23520,
23552, 23600, 23072, 23712, 23648, 23648, 23744, 23536, 23712, 23568,
23888, 23744, 23552, 24176, 23808, 23952, 22544, 23440, 23680, 23616,
23712, 22880, 23680, 23248, 23744, 23520, 23632, 23632, 23664, 23680,
23600, 23664, 23760, 23584, 23488, 24304, 22896, 23744, 23632, 23680,
23680, 23648, 23696, 23744, 23856, 23568, 23600, 23648, 23632, 23648,
23744, 23600, 23792, 24000, 23488, 23616, 23680, 23632, 23952, 23648,
23472, 23776, 23552, 23632, 23696, 23664, 23552, 23648, 23680, 23680,
23520, 23648, 23696, 23648, 23664, 23616, 23680, 23584, 23536, 23664,
23552, 23728, 23664, 23856, 23616, 23744, 23664, 23568, 23664, 23712,
23808, 23712, 23600, 23712, 23504, 23280, 23920, 23712, 23616, 23680,
23696, 23296, 23504, 23456, 23568, 23568, 23552, 23664, 23600, 23632,
23600, 23648, 23776, 23536, 23440, 23536, 23680, 23744, 23648, 23760,
23680, 23744, 23520, 23648, 23632, 23680, 23744, 23680, 23600, 23760,
24240, 23568, 23280, 23728, 23712, 23680, 23648, 23728, 23696, 23616,
23664, 23632, 23680, 23328, 23712, 23840, 23872, 23200, 23728, 23632,
23664, 23680, 24416, 23520, 23664, 23712, 23696, 23536, 23664, 23888,
23632, 23072, 23696, 23376, 23712, 23648, 23424, 23552, 23744, 23664,
23616, 23680, 23600, 23664, 23680, 23584, 23632, 23632, 23872, 23632,
24192, 23632, 23680, 23472, 23728, 23664, 23552, 23680, 23712, 23680,
23648, 23664, 23552, 23536, 23152, 23520, 23600, 23600, 23696, 23584,
23472, 23776, 23280, 24048, 23568, 23584, 23616, 23680, 23680, 23728,
23552, 23536, 23728, 23680, 23600, 23648, 23600, 23568, 23392, 23648,
23536, 23776, 23616, 23552, 23648, 23584, 23680, 23648, 23536, 23728,
23584, 23616, 23696, 23744, 23600, 23536, 23280, 23968, 23552, 23104,
23632, 23840, 23728, 23600, 23680, 23664, 23488, 24112, 23680, 23776,
23680, 23600, 23792, 23728, 23728, 23504, 23296, 23648, 23696, 23616,
23680, 23648, 23568, 23664, 23712, 23552, 23536, 23648, 23632, 23696,
23840, 23536, 23744, 23472, 23552, 23680, 23568, 23600, 23648, 23616,
23776, 23664, 23488, 23712, 23664, 23648, 23808, 23680, 23952, 23680,
23696, 23616, 23856, 23664, 23200, 23760, 23568, 23728, 23664, 23616,
23648, 23664, 23648, 23808, 23904, 23184, 23760, 23632, 23584, 23056,
23712, 23632, 23632, 23616, 23648, 23696, 23488, 23344, 23680, 23600,
23984, 23680, 23616, 23712, 23472, 23616, 23712, 23680, 23600, 23616,
23680, 23664, 23872, 23344, 23664, 23680, 23552, 23664, 23920, 23712,
23680, 23616, 23680, 23664, 22496, 23760, 23632, 23776, 24448, 23648,
23648, 23664, 23488, 23744, 23472, 23728, 23600, 23680, 23600, 23600,
23936, 23936, 23552, 23712, 23568, 23600, 23600, 23600, 23648, 23664,
23552, 23808, 23680, 23680, 23504, 23632, 23744, 23600, 23664, 23808,
23984, 23488, 23600, 23680, 23728, 23664, 23648, 23664, 23888, 23504,
23632, 23664, 23664, 23744, 23680, 23712, 23648, 23616, 23360, 23744,
23520, 23600, 23872, 24016, 23664, 24000, 23632, 23648, 23088, 23600,
23744, 23696, 23504, 23712, 23664, 23648, 23680, 23296, 23664, 23680,
23552, 23360, 23328, 23520, 23040, 23584, 23712, 23680, 23568, 23648,
22960, 23248, 23632, 23632, 23680, 24016, 23312, 23728, 23600, 23728,
23440, 23632, 23648, 23552, 23728, 23568, 23472, 23616, 23760, 23264,
23888, 23872, 23568, 23648, 23872, 23632, 23712, 23504, 23648, 24000,
23008, 23920, 23712, 23552, 23680, 23616, 23664, 23536, 23680, 23504,
23584, 23600, 23744, 23552, 23488, 23600, 23632, 23680, 23776, 23600,
23792, 23584, 23552, 23600, 23680, 23712, 23568, 23744, 23664, 23504,
23472, 23808, 23808, 23552, 23584, 23648, 23648, 23584, 23568, 23696,
23456, 23552, 23792, 23600, 23040, 23440, 23744, 24176, 23536, 23680,
23600, 23568, 23616, 23616, 23792, 23696, 23648, 23632, 23552, 23792,
23856, 23616, 23664, 23216, 23536, 23616, 23472, 23696, 23008, 23728,
23696, 23552, 23744, 23600, 23536, 24032, 23664, 23056, 23824, 23696,
23600, 23872, 23728, 23312, 23584, 23680, 23680, 23552, 24176, 23648,
24064, 23664, 23488, 23616, 23680, 23664, 23648, 23584, 23744, 23632,
23648, 23584, 23600, 23568, 23616, 23728, 23744, 23648, 23712, 23616,
23600, 23824, 23632, 23616, 23616, 23680, 23568, 23664, 23552, 23024,
23760, 24144, 23616, 23680, 23440, 23616, 23360, 23136, 23680, 23648,
23536, 23568, 23680, 24080, 23840, 23680, 23632, 23536, 23616, 23648,
23664, 23680, 23648, 23680, 23488, 23488, 23552, 23632, 23600, 23584
)
samples4 <- c(
23648, 23264, 23600, 23472, 23504, 23280, 23872, 23760, 23840, 23696,
23680, 23792, 23872, 23440, 23840, 23776, 23424, 23760, 23776, 23664,
23808, 23728, 23520, 23840, 23376, 23792, 24288, 23760, 23808, 23728,
23744, 23712, 23728, 23952, 23760, 23712, 23824, 23904, 23568, 23808,
23632, 24000, 24000, 24016, 23424, 23760, 23824, 23872, 23760, 23744,
23664, 23792, 23776, 23808, 23616, 23824, 23712, 23472, 23792, 23696,
23808, 23808, 23616, 23936, 23776, 23776, 23408, 23712, 23712, 23568,
23424, 24304, 23744, 23728, 23744, 23680, 23792, 23824, 23728, 23648,
23744, 23728, 23728, 24080, 23840, 23680, 23936, 23600, 23792, 23584,
23616, 23664, 23664, 23744, 23664, 23712, 23776, 23664, 23600, 23840,
23648, 23664, 23616, 23632, 23824, 23712, 22784, 23664, 23696, 23712,
23824, 23712, 23728, 23872, 23664, 23616, 23808, 23696, 23744, 23728,
23808, 23728, 24096, 23664, 23776, 23408, 23728, 23664, 23728, 23792,
23760, 23744, 23712, 24160, 23632, 23568, 23760, 23776, 23696, 23712,
23648, 23808, 23616, 23664, 23248, 23664, 23696, 23680, 23776, 23840,
23888, 24320, 23536, 23424, 22880, 23744, 23680, 23552, 23664, 23776,
23712, 23600, 23664, 23680, 23664, 23776, 23632, 23872, 23920, 23680,
23808, 23648, 23664, 23712, 23664, 23552, 23696, 23552, 23376, 23808,
23536, 23712, 23728, 23776, 23696, 24176, 23568, 23840, 23424, 23760,
23760, 23744, 23664, 23888, 23584, 23888, 23600, 24016, 23488, 23808,
23840, 23712, 23648, 23904, 23808, 23728, 23648, 23632, 23744, 23888,
23536, 23744, 23632, 23760, 23072, 23680, 23840, 23728, 23584, 23680,
23696, 23712, 23360, 23632, 23664, 23328, 23760, 23680, 23664, 23760,
23664, 23360, 23744, 23760, 23760, 23808, 23664, 23712, 24016, 23776,
24336, 23776, 23776, 24160, 24112, 23648, 23888, 23920, 23776, 24064,
23760, 23744, 23840, 23728, 23712, 23696, 23696, 23648, 23920, 23648,
23680, 23408, 23680, 23680, 23680, 23632, 23584, 23664, 23728, 23904,
23600, 23648, 23776, 23648, 23552, 23744, 23712, 23696, 23680, 23840,
23696, 23744, 23712, 23744, 23872, 23808, 23680, 23664, 23840, 23728,
23600, 23616, 23600, 23760, 23072, 23584, 23792, 23664, 24064, 23744,
23520, 23824, 23744, 23536, 23616, 23872, 24176, 23648, 23376, 23600,
23648, 23744, 23744, 23664, 23760, 23776, 23696, 23552, 23648, 23728,
23776, 23760, 23456, 23280, 23536, 24080, 23632, 22848, 23616, 23520,
23744, 23888, 23632, 23712, 23648, 23552, 23680, 23680, 23696, 23680,
23424, 23696, 23664, 23344, 23584, 23744, 23680, 23744, 23712, 23776,
23328, 24352, 23472, 23776, 24240, 23472, 23648, 23680, 23664, 23664,
23664, 23904, 23616, 23536, 23792, 23808, 23712, 23648, 23840, 23856,
23648, 23552, 23696, 23536, 23616, 23456, 23680, 23744, 23664, 23712,
23840, 23696, 23568, 23776, 23680, 23776, 23360, 23968, 23664, 23632,
23584, 23632, 23728, 23120, 23680, 23712, 23552, 23760, 23840, 23680,
23536, 23632, 23680, 23888, 24064, 23744, 24096, 23616, 23296, 23632,
24176, 23632, 23664, 23648, 23824, 23472, 23488, 23712, 23664, 23728,
23664, 23536, 23728, 23632, 23824, 23680, 23552, 23520, 23456, 23712,
23424, 23680, 23232, 23696, 23552, 23616, 23648, 23696, 23728, 22768,
23760, 23616, 23568, 23472, 23728, 23648, 23648, 23712, 24032, 24128,
23808, 23520, 23680, 23616, 23664, 23600, 23680, 23616, 23632, 23728,
23648, 23648, 23616, 23664, 23600, 23680, 23760, 23696, 23616, 23792,
23808, 23552, 23648, 23696, 23792, 23696, 23632, 23792, 23712, 23584,
23664, 23680, 23680, 23680, 23584, 23328, 23760, 23536, 23664, 23536,
23744, 23968, 23552, 23824, 23664, 23680, 23744, 23776, 23616, 23968,
23552, 23520, 23664, 23680, 23760, 23664, 23568, 23520, 24048, 23712,
23712, 23664, 23696, 23984, 23392, 23680, 23920, 23648, 24256, 23776,
23696, 23808, 23568, 23584, 23600, 23536, 24240, 23088, 23728, 23680,
23568, 23472, 23616, 23648, 23696, 23680, 23664, 23648, 23664, 23632,
23632, 23664, 23600, 23712, 23728, 23664, 23728, 23616, 23664, 23616,
23328, 23664, 23680, 23488, 23760, 23680, 23744, 23680, 23584, 23824,
23360, 23408, 24208, 24560, 23664, 23648, 23664, 23680, 23568, 23680,
23664, 23760, 23712, 23760, 23808, 23680, 23536, 23664, 23696, 23632,
23680, 23680, 23664, 23648, 23728, 23584, 23552, 23712, 23600, 23680,
23680, 23728, 23712, 23648, 23568, 23680, 23936, 23632, 23632, 23728,
23744, 23520, 23168, 23776, 23680, 23680, 23696, 23648, 23696, 23696,
23536, 23744, 23552, 23520, 23744, 23568, 23232, 23568, 23776, 23808,
23504, 23632, 23616, 23648, 23600, 23680, 23584, 23856, 23648, 23680,
23552, 23712, 23872, 23664, 23664, 24192, 23680, 23712, 23536, 23744,
23664, 23616, 23888, 23616, 23312, 23632, 23616, 23744, 23696, 23696,
23712, 23712, 23680, 23632, 23648, 23920, 23408, 22976, 23760, 23488,
23648, 23616, 23392, 23792, 23600, 23472, 23712, 23152, 23584, 23664,
23600, 23728, 23536, 23568, 23744, 22960, 23664, 23984, 23664, 23536,
23344, 23376, 24000, 23536, 23920, 23664, 23680, 23520, 23648, 23600,
23712, 23408, 23600, 23712, 23728, 23616, 23696, 23648, 23632, 23824,
23664, 24240, 23824, 23712, 23248, 23552, 24000, 23680, 23904, 23648,
23744, 23664, 23712, 23808, 23952, 23616, 23616, 23664, 23664, 23424,
23648, 23504, 23472, 23520, 23968, 23968, 23632, 23952, 23744, 23680,
23696, 23744, 23536, 23696, 23600, 23616, 23664, 23600, 23744, 23616,
23568, 23664, 23744, 23744, 23664, 23328, 23744, 23680, 23536, 23568,
23632, 23600, 23696, 23664, 23632, 23776, 23536, 23472, 23584, 23648,
23472, 23696, 23536, 23792, 23712, 23600, 23648, 23648, 23728, 24000,
23520, 23696, 23488, 23824, 23712, 23584, 23664, 23728, 23728, 23824,
23936, 23696, 23664, 23552, 23648, 23712, 23632, 23616, 23680, 23712,
23632, 23680, 23680, 23536, 23744, 23632, 23648, 23968, 22128, 23664,
24560, 23520, 23024, 23600, 23680, 23680, 24064, 23200, 23744, 23568,
23792, 23600, 23760, 23632, 23712, 23680, 23664, 23680, 23728, 23568,
23888, 23984, 23200, 23744, 23968, 23936, 23728, 23504, 23584, 23616,
24160, 23744, 23648, 23760, 23696, 23552, 23664, 23600, 23728, 23632,
23664, 23712, 23568, 23696, 23184, 23552, 23904, 23792, 23696, 23104,
23664, 23632, 23600, 23104, 23744, 23664, 23600, 23616, 23680, 23648,
23552, 23504, 23440, 23680, 23248, 23296, 23552, 22256, 24240, 23616,
23680, 23712, 23552, 23680, 23712, 23680, 23664, 23680, 23984, 23648,
23552, 23680, 23696, 23712, 23600, 23792, 23600, 23232, 23856, 23600,
23664, 23984, 24128, 23696, 23664, 23696, 23584, 23504, 23680, 23600,
23744, 23680, 23664, 23808, 23616, 23568, 23936, 22912, 23760, 23952,
23808, 24784, 23600, 23808, 23776, 23952, 23712, 23744, 23584, 23616,
23680, 23696, 23744, 23536, 23552, 23680, 23632, 23648, 23664, 23712,
23696, 23792, 23680, 23568, 23776, 23680, 23568, 23696, 23680, 23712,
23408, 23280, 23808, 23648, 23664, 23664, 23680, 23600, 23568, 23552,
23776, 23680, 23616, 23664, 23584, 23472, 23488, 23856, 23744, 23072,
23824, 23664, 23616, 23616, 23568, 23760, 23664, 23552, 23648, 22336,
24208, 23584, 23664, 23712, 23744, 23584, 23696, 23696, 24032, 23664,
23680, 23584, 23680, 23648, 23936, 23632, 23792, 23616, 23616, 23648,
23632, 23600, 23600, 24304, 23808, 23648, 23600, 23648, 23584, 23632
)
batt_samples5 <- c(
29056, 28272, 28592, 28608, 28672, 28528, 28672, 28832, 28400, 29520,
28784, 28608, 28784, 28544, 28576, 28672, 28528, 28672, 28640, 28576,
28480, 28640, 28512, 28640, 28352, 27600, 28800, 29136, 28544, 29568,
29216, 28560, 28608, 28720, 28704, 28912, 28640, 29168, 28672, 28528,
28592, 29264, 28672, 28560, 28576, 28736, 28544, 28448, 28544, 28944,
28656, 28480, 28336, 28608, 28624, 28608, 28544, 28768, 28464, 27536,
28432, 28464, 28544, 28576, 29520, 28640, 28624, 28464, 28624, 28720,
28560, 28624, 28656, 28016, 28064, 28544, 28544, 28576, 28736, 28576,
28672, 28528, 28864, 28512, 28464, 28528, 28496, 28736, 28560, 28320,
28720, 27872, 28448, 28416, 28368, 28688, 28608, 28496, 27728, 28528,
28464, 28032, 28640, 28592, 28480, 28528, 28528, 28672, 28736, 28592,
28576, 28528, 28528, 28592, 28608, 28928, 28608, 27136, 28416, 28704,
28128, 27712, 28544, 28592, 28768, 28224, 28720, 28848, 28336, 27936,
28512, 28640, 28512, 28800, 31936, 28608, 28608, 28640, 28608, 28512,
28704, 28592, 28592, 28240, 28432, 28528, 28624, 28592, 28528, 28592,
28528, 28640, 28608, 28480, 28736, 28608, 28560, 28544, 28528, 28576,
28656, 28672, 28496, 27648, 28784, 29040, 28752, 28960, 28704, 28672,
28512, 28640, 28544, 28608, 28672, 28720, 28608, 28576, 28544, 28528,
28624, 28528, 28848, 28704, 28640, 28592, 28512, 28544, 28592, 28608,
28560, 28736, 28640, 28592, 28736, 28464, 29488, 28640, 28720, 28608,
28656, 27584, 28560, 28912, 28800, 28400, 28512, 28928, 28592, 28576,
28592, 28672, 28576, 28672, 28576, 28608, 27232, 28400, 28512, 28592,
28512, 28576, 28624, 28560, 28464, 28672, 28592, 28672, 28544, 28496,
28528, 28544, 28656, 28480, 28336, 28768, 28912, 29744, 28112, 28528,
28560, 28640, 28656, 28656, 28656, 28560, 29728, 28528, 28640, 28832,
29616, 28768, 28640, 28528, 28448, 28400, 28560, 28688, 28784, 28608,
28608, 28576, 28544, 28592, 28544, 28176, 29552, 28560, 28624, 28496,
28736, 28480, 28560, 28592, 28544, 28560, 28624, 28624, 28544, 28576,
28208, 28640, 28400, 28640, 28608, 28480, 28544, 28480, 28912, 28688,
28608, 28464, 28704, 28672, 28592, 28608, 28608, 28608, 28640, 28608,
28592, 28528, 28688, 28704, 28608, 28544, 28816, 28464, 28016, 28688,
28768, 28624, 28608, 28608, 28672, 28720, 28656, 28544, 28528, 28416,
28704, 28672, 28608, 29248, 28640, 27520, 28928, 28672, 28560, 28496,
28656, 28608, 28128, 29360, 28592, 28608, 28512, 28560, 28448, 28656,
28992, 28608, 28688, 28496, 28672, 28496, 28736, 28672, 28592, 28544,
28528, 29104, 28512, 28752, 29296, 29968, 29040, 28560, 28688, 28608,
28608, 28512, 27872, 28720, 28640, 28592, 29104, 27600, 28560, 28512,
28512, 28736, 28640, 28640, 28736, 28544, 28592, 28704, 28672, 28592,
28496, 28848, 28432, 28560, 28496, 28592, 28656, 28848, 28464, 28560,
28544, 28528, 28464, 28624, 28704, 28560, 28512, 28864, 28688, 28544,
28832, 28576, 28464, 28448, 29376, 28528, 28560, 28640, 28192, 28544,
28432, 28656, 28624, 27536, 28448, 28576, 28624, 28576, 28608, 28672,
28576, 28672, 28496, 28768, 28384, 28752, 28544, 28640, 28640, 28528,
28576, 28592, 28368, 28576, 28640, 28528, 28608, 28640, 28640, 28528,
28544, 28704, 28608, 28528, 28736, 28560, 28672, 28656, 28640, 28704,
28672, 28848, 28528, 28592, 28640, 28400, 27424, 28608, 28432, 28704,
28768, 28608, 28080, 28832, 28848, 28848, 28592, 29744, 28560, 28528,
27648, 28928, 29360, 28928, 28752, 28528, 28576, 28512, 28528, 29408,
28320, 28480, 28608, 28576, 28368, 28608, 28656, 28656, 28592, 28592,
28560, 28576, 28528, 28592, 28576, 28576, 28464, 28560, 29200, 28464,
28560, 28608, 28592, 28672, 29840, 28656, 28880, 27504, 29888, 28800,
28416, 28368, 28672, 28640, 29984, 28592, 30672, 28608, 28608, 28640,
28480, 28352, 28608, 28992, 28528, 28480, 28544, 28672, 28544, 28544,
25520, 28608, 28496, 28448, 28288, 28592, 28800, 28688, 28384, 28400,
28336, 28528, 28272, 28688, 28592, 28592, 28032, 28656, 28480, 28160,
28800, 28512, 28608, 28432, 28512, 28880, 28592, 28512, 28656, 28640,
28512, 28448, 28464, 28512, 28544, 28672, 27904, 28672, 28208, 28608,
28576, 28736, 28512, 28528, 28608, 28656, 28640, 28656, 28640, 28544,
28656, 28528, 28464, 28512, 28592, 28592, 28640, 28640, 28464, 28864,
28544, 28608, 28496, 28528, 28240, 28672, 29840, 28688, 27952, 28528,
28448, 28656, 28640, 28496, 28592, 28672, 28640, 28016, 28560, 28784,
28576, 28192, 28480, 28544, 28688, 28560, 28544, 28672, 28592, 28656,
28672, 28640, 28720, 28560, 28656, 28592, 28592, 29792, 28176, 28560,
27392, 28608, 28544, 28608, 28592, 29296, 28368, 28688, 29168, 28560,
28544, 27776, 28544, 28544, 28576, 28592, 28592, 28560, 28768, 28528,
28672, 28656, 29392, 28560, 28752, 28688, 28560, 28736, 28592, 28608,
28992, 27648, 28720, 28672, 28608, 28624, 28480, 28576, 28736, 28560,
28544, 28560, 27312, 28688, 28416, 28592, 28976, 28624, 28608, 28736,
28576, 28656, 28480, 28448, 28704, 28352, 28640, 28432, 28560, 28592,
28448, 28544, 28592, 28528, 28560, 28608, 28512, 28464, 28704, 28624,
28640, 28544, 28800, 28624, 28608, 28608, 27760, 28576, 28656, 28608,
28608, 28688, 28608, 28480, 28608, 28384, 28640, 28576, 28592, 28768,
28528, 28592, 28608, 28848, 28816, 29216, 28240, 28544, 28816, 28192,
28800, 28576, 28736, 28496, 28576, 28944, 28800, 29136, 26736, 28896,
28704, 28352, 28528, 28672, 28400, 28800, 28960, 28512, 28672, 28608,
28528, 28496, 28160, 28864, 28192, 28544, 28576, 28496, 28576, 28528,
28544, 28752, 28576, 28656, 28272, 28656, 28624, 29216, 27744, 28656,
28000, 28672, 28560, 28720, 28656, 28528, 27520, 28432, 28688, 28592,
28560, 28544, 28384, 28720, 28432, 28880, 28400, 28496, 28432, 28768,
28640, 28608, 28128, 28544, 28896, 28608, 28416, 28656, 27472, 28832,
27456, 28496, 28624, 28528, 28480, 28624, 28592, 28640, 28672, 28528,
28576, 28688, 28752, 28672, 28272, 28688, 28080, 28640, 28672, 28288,
28544, 28592, 28672, 28208, 28480, 28608, 28704, 28608, 28640, 28736,
28448, 29312, 27568, 28528, 28592, 28560, 28656, 28608, 28592, 28672,
28608, 28032, 28592, 28656, 28672, 28544, 28624, 28640, 29072, 28544,
27920, 28496, 28656, 28688, 28672, 28544, 28544, 28608, 28464, 28464,
28528, 28656, 28736, 28480, 29056, 28720, 28704, 28480, 28608, 28736,
28400, 28592, 28544, 28608, 28432, 28576, 28608, 28928, 28672, 28976,
28608, 28400, 28752, 28512, 28592, 28672, 28704, 28544, 28752, 28352,
28640, 28560, 28688, 28560, 29264, 28528, 28672, 28560, 28656, 28592,
28736, 28464, 28576, 28544, 28672, 28672, 27568, 27584, 28624, 28656,
29616, 28688, 28624, 28688, 28608, 28544, 28464, 28432, 28576, 28560,
28688, 28544, 28528, 28464, 28496, 28592, 29152, 28384, 28608, 28560,
28528, 28400, 28272, 28464, 28688, 28672, 28608, 28576, 27984, 28928,
28800, 28720, 29232, 28688, 28528, 28592, 28656, 28608, 28656, 28384,
28480, 28640, 28368, 28512, 28528, 28768, 28592, 28576, 28688, 28944,
28544, 28672, 29104, 28912, 28592, 28608, 28080, 28624, 28480, 28576,
28672, 28464, 28528, 28480, 28576, 28528, 28576, 28736, 28592, 28496,
28784, 28608, 28656, 28592, 28528, 28528, 28480, 28592, 29664, 28528,
28528, 28816, 28512, 28544, 28592, 28608, 28624, 28640, 28592, 28528
)
batt_samples6 <- c(
28688, 28736, 28544, 28512, 28624, 28816, 27712, 28720, 28656, 28512,
28704, 28672, 28480, 28560, 28608, 28384, 29552, 28272, 28656, 29520,
28784, 28736, 28720, 28640, 28416, 28576, 28640, 28624, 28672, 28672,
28560, 27648, 28608, 28016, 28576, 28544, 28512, 28608, 28528, 28656,
28608, 28400, 28624, 28528, 28640, 28784, 28480, 28688, 28544, 28640,
28528, 28512, 28416, 28480, 28608, 28432, 28528, 28560, 29168, 28336,
28448, 28576, 27488, 28608, 28512, 28672, 28576, 28704, 28544, 28528,
28272, 29632, 28720, 28400, 28736, 28528, 28464, 28544, 28560, 28704,
28560, 28752, 28528, 28496, 28464, 28544, 28624, 28544, 28544, 28400,
28544, 28544, 28576, 28576, 28432, 28176, 28608, 27984, 28528, 28400,
28576, 28480, 26816, 28528, 28784, 29056, 28640, 28864, 28528, 28576,
29056, 28480, 28496, 28784, 26368, 28880, 27936, 28672, 27680, 28736,
28592, 28592, 28560, 28672, 28480, 28496, 28640, 28608, 28576, 28480,
28544, 28608, 29264, 28624, 28560, 29088, 28480, 28624, 28496, 28528,
28608, 28736, 28640, 28720, 28544, 28592, 28608, 28464, 28640, 28576,
28640, 28144, 28576, 29504, 28544, 28544, 28672, 28672, 29616, 28000,
28672, 28576, 28656, 28576, 28592, 28496, 28576, 28608, 28576, 28608,
28624, 28656, 28672, 28400, 28544, 27232, 28688, 28896, 28800, 28224,
28736, 28464, 28512, 28608, 28592, 28624, 28576, 28464, 27888, 28768,
29168, 28512, 28528, 29136, 28608, 28768, 28576, 28624, 28544, 28464,
28656, 28544, 29216, 28624, 27888, 28768, 28400, 28688, 28688, 28528,
28512, 28416, 28432, 28720, 28768, 28624, 28544, 28528, 28752, 28560,
30176, 28400, 28464, 28800, 28608, 28624, 28576, 27648, 28736, 28512,
28576, 27488, 28592, 28528, 29264, 27984, 28656, 27728, 28544, 28608,
28560, 28464, 28560, 28560, 28592, 28592, 28576, 28592, 28624, 28112,
28416, 28592, 28352, 28544, 28544, 28544, 28544, 28640, 28528, 28544,
28640, 28400, 29456, 28576, 29824, 28288, 28544, 28576, 28688, 28720,
28688, 28544, 28528, 28672, 28608, 28544, 28528, 28624, 28528, 28592,
28480, 28416, 28656, 28640, 27984, 28608, 28464, 28576, 28912, 28480,
28224, 28464, 28544, 28752, 28528, 28768, 28448, 28496, 28592, 28608,
30032, 28544, 28560, 28576, 28544, 28576, 28528, 28592, 28096, 28512,
28464, 28608, 28672, 28736, 28576, 28608, 28704, 27904, 28464, 28576,
28624, 28464, 28560, 28528, 28544, 28544, 28576, 28656, 28720, 28512,
27216, 28096, 28400, 29008, 28672, 28576, 28544, 28272, 28592, 28416,
28624, 28608, 28608, 28576, 28528, 28560, 28576, 28512, 28304, 28496,
28496, 28704, 28480, 29296, 28992, 28864, 28560, 27936, 28528, 28560,
28528, 28608, 28544, 28768, 28544, 28608, 28464, 28496, 28688, 29392,
28832, 28464, 27872, 28640, 28672, 28672, 28560, 28560, 28576, 28672,
28544, 28608, 28544, 28592, 28512, 28544, 28672, 28608, 28528, 28736,
28400, 28464, 28624, 28608, 28608, 28608, 27312, 28704, 28560, 28544,
28240, 28640, 28640, 28464, 28384, 28592, 28624, 28512, 28320, 29120,
28416, 28464, 28464, 28496, 28608, 29504, 28672, 28544, 28576, 28560,
28624, 28512, 28672, 28704, 28528, 28720, 28464, 26960, 28608, 28864,
28608, 28464, 28560, 28224, 28512, 28640, 28688, 28560, 28672, 28608,
28544, 28576, 28496, 28464, 29200, 28496, 28704, 28560, 29328, 27600,
28448, 27392, 27936, 28448, 28624, 28592, 28704, 28640, 28624, 28592,
28576, 28832, 28624, 28656, 28560, 28560, 28528, 29312, 28656, 28416,
28544, 28992, 28880, 28592, 27264, 28672, 28528, 28384, 28640, 28512,
28528, 28576, 28592, 28512, 28528, 27904, 28544, 28528, 28544, 28640,
28560, 28672, 26016, 28016, 28512, 28368, 28336, 28528, 28400, 28640,
28480, 28592, 28592, 28624, 28704, 28608, 28464, 28704, 28640, 29568,
28544, 28528, 28608, 28608, 28480, 28384, 28624, 29824, 28368, 28672,
28608, 28448, 28576, 28416, 28336, 28512, 28736, 28688, 28768, 29232,
28672, 28400, 28544, 27696, 28688, 28480, 28672, 28576, 28528, 28464,
28512, 28544, 28640, 26368, 28592, 28704, 28640, 28480, 28576, 28592,
28512, 28128, 28704, 28736, 26752, 28544, 28800, 28512, 28464, 28544,
28480, 28544, 28592, 28512, 28672, 28576, 28560, 28336, 28528, 28336,
28656, 28464, 28640, 28496, 28480, 28528, 28528, 28528, 28528, 28576,
28368, 28496, 28608, 28528, 28592, 28720, 28512, 28272, 28560, 28928,
27600, 28144, 28640, 28896, 27536, 28176, 28624, 28464, 28304, 28608,
27472, 28528, 28496, 28592, 28592, 28544, 28608, 28624, 28592, 28640,
28624, 28496, 28528, 28608, 29568, 27888, 28640, 28608, 28752, 28496,
28464, 28592, 28640, 28576, 28544, 28688, 28672, 28656, 28480, 28592,
28576, 28112, 28368, 29552, 28496, 28480, 28576, 29904, 28592, 29168,
28608, 28576, 28576, 28416, 28672, 28560, 28352, 28560, 28624, 28592,
28416, 28736, 28624, 28640, 28496, 28528, 28528, 28720, 28560, 28704,
27744, 28528, 28544, 28544, 28528, 28576, 28656, 28496, 28592, 28672,
28592, 29152, 26752, 29328, 28768, 28192, 29760, 28608, 28768, 28528,
28576, 28592, 28560, 28704, 27584, 28704, 28672, 28576, 28608, 28496,
28576, 28672, 28672, 29248, 28704, 27792, 28736, 28576, 27920, 28576,
28672, 28544, 28496, 28512, 28640, 28512, 28528, 28576, 28512, 28608,
28560, 28656, 28528, 28736, 28608, 28688, 28496, 28480, 28592, 28928,
28640, 28528, 28976, 28560, 28512, 28528, 28544, 28544, 28608, 28608,
28112, 28704, 28592, 28576, 28448, 28688, 27680, 28096, 28400, 28416,
28672, 27968, 28512, 28656, 28640, 28560, 28464, 28576, 28528, 28608,
28560, 28512, 28544, 28592, 28576, 28592, 28464, 28720, 28464, 28544,
28528, 28592, 29552, 28608, 28576, 29136, 28544, 28576, 28560, 28640,
28480, 28464, 28480, 28576, 28752, 28560, 28240, 28480, 28576, 28368,
27840, 25152, 28848, 28608, 28528, 28560, 28640, 28480, 28608, 28576,
28400, 28720, 28656, 28576, 28400, 28672, 27504, 28400, 28592, 28416,
28064, 28688, 28704, 28464, 28608, 28528, 28528, 28528, 28560, 28624,
28560, 28560, 28688, 28400, 28656, 28144, 28496, 28944, 28576, 28480,
28560, 28672, 28544, 28560, 28496, 28544, 28608, 28608, 28512, 28464,
28592, 28672, 28528, 28208, 28528, 28496, 28928, 28672, 28544, 28672,
28528, 28560, 28608, 28496, 28576, 28560, 27536, 28608, 28624, 28592,
28704, 28672, 28528, 29296, 28560, 28576, 28544, 28928, 28512, 27792,
28528, 28544, 29552, 28576, 28704, 28592, 28288, 28480, 28560, 28592,
28464, 28640, 28672, 28592, 26944, 28528, 28672, 28144, 28608, 28592,
28608, 28560, 28560, 28640, 28560, 28512, 28640, 28720, 28576, 28480,
28592, 28592, 28720, 28528, 28480, 28608, 28752, 28528, 28576, 28608,
28672, 28672, 28576, 28688, 29472, 28544, 29296, 28464, 28496, 28704,
28480, 28544, 28672, 28512, 28544, 28608, 28464, 28656, 28576, 28528,
28496, 28656, 29696, 28496, 29120, 28512, 28352, 28496, 28576, 28640,
28544, 28672, 28560, 28608, 28496, 28496, 28576, 28608, 28544, 28720,
28624, 28528, 28592, 28672, 28464, 28544, 28528, 28656, 28496, 28560,
28480, 28560, 29552, 28320, 28544, 28192, 28768, 28832, 27408, 28848,
28800, 28560, 28560, 28464, 28432, 28688, 27696, 28656, 28880, 28784,
28912, 28608, 28256, 28528, 28672, 28560, 28672, 28608, 28592, 28560,
28416, 28592, 28448, 28640, 28544, 28576, 28032, 28528, 28656, 28560,
28608, 28576, 28560, 28640, 28672, 28528, 28624, 29744, 28592, 28560
)
#############################################
ref_voltage <- 3.3
sample_spacing <- 16 ### for 12bit stored in 16bit unsigned int
graphsets <- list(filename=c("md100nFcap4", "batterys5", "batterys6"),
conditions=c("across 0.1uF capacitor",
paste("across alkaline single cell", c("sample i", "sample ii"))),
mod_sd_factor=c(0.4, 0.25, 0.25),
samples=list(samples4, batt_samples5, batt_samples6))
conv_factor = ref_voltage/65535.0
for (idx in seq(1, length(graphsets$filename))) {
dataname <- graphsets$filename[[idx]]
conditions <- graphsets$conditions[[idx]]
mod_sd_factor <- graphsets$mod_sd_factor[[idx]]
raw_samples <- graphsets$samples[[idx]]
sample_period <- 47.469 * 1e-3
#notes_pos_x <- 10 * 1e-3
notes_pos_x <- sample_period * 15/50
#rc_value <- 1e6 * 2.2 * 1e-6
rc_value <- 1e6 * 0.1 * 1e-6
delay <- 0.005
### These are now set at top of for loop
###conditions <- "during slow capacitor discharge"
###conditions <- "across capacitor"
samples_df <- data.frame(samples=raw_samples,
voltage=raw_samples *conv_factor,
time=seq(0.0, sample_period,
length.out=length(raw_samples)))
model_exp <- nls(voltage ~ I(a * exp((-time - delay) / b)),
data=samples_df, start=list(a=samples_df$voltage[1],
b=rc_value))
model_poly1_pass1 <- nls(voltage ~ I(a * time + b),
data=samples_df,
start=list(a=0, b=ref_voltage/2))
samples_df$res_poly1_pass1 <- residuals(model_poly1_pass1)
### There's a handful around 50ms out
outlier_s <- 0.050
### Calculate some weights to reduce effect of outliers on nls (s for squares)
### take the distance away from the outlier_s based on first fitted line
### and cap that, scale to 4.99 then cap those values at 4.0
samples_df$w1 <- pmin((outlier_s - pmin(outlier_s,
abs(samples_df$res_poly1_pass1))) * 4.99/outlier_s,
4.0)
model_poly1_pass2 <- nls(voltage ~ I(a * time + b),
data=samples_df,
weights=samples_df$w1,
start=list(a=0, b=ref_voltage/2))
### plot graph with points, curve, both lines.
### plot same graph and zoom in on vertical
### TODO - colours are terrible
### yellow points are barely visible
### TODO clean-up legend
### TODO add annotation bottom left with
### mean, mean(fitted exp()), mean(fitted line), mean(fitted line (weighted))
### Do i care about Q-Q?
###
mean_v = mean(samples_df$voltage)
mean_curve_v = mean(predict(model_exp, list(x=samples_df$time)))
mean_line_v = mean(predict(model_poly1_pass1, list(x=samples_df$time)))
mean_weighted_v = mean(predict(model_poly1_pass2, list(x=samples_df$time)))
### Hacky but I want to use geom_label
max_v = max(samples_df$voltage)
min_v = min(samples_df$voltage)
NApadding = rep(NA, dim(samples_df)[1] - 1)
samples_df$label_x <- c(notes_pos_x, NApadding)
samples_df$label_y <- c(min_v + (max_v - min_v) * 0.15, NApadding)
samples_df$label <- c(sprintf("means \n samples=%.4f\ncurve=%.4f\nline=%.4f\n weighted line=%.4f",
mean_v, mean_curve_v, mean_line_v, mean_weighted_v),
NApadding)
g1 <- ggplot(samples_df, aes(x=time)) +
ggtitle(paste("P1 pad voltage", conditions)) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, 'char'),
panel.grid.minor.y=element_blank()) +
scale_x_continuous(labels=function(x) { x * 1000.0 }) +
labs(x="time (ms)", y="voltage (V)") +
geom_point(aes(y=voltage,
color=cut(samples_df$w1, c(-1, 1, 2, 3, 3.8, 4)),
size=5.5 - samples_df$w1)) +
geom_line(aes(y=predict(model_exp, list(x=time)), color="curve"),
lwd=6, alpha=0.75) +
geom_line(aes(y=predict(model_poly1_pass1, list(x=time)), color="line"),
linetype="dashed", lwd=3, alpha=0.5) +
geom_line(aes(y=predict(model_poly1_pass2, list(x=time)), color="weighted"),
linetype="twodash", lwd=3, alpha=0.5) +
guides(size=FALSE, linetype=FALSE) +
scale_color_manual(values=c("(-1,1]" ="red",
"(1,2]" = "orangered",
"(2,3]" = "orange",
"(3,3.8]" = "gray80",
"(3.8,4]" = "gray30",
"curve" = "grey",
"line" = "purple",
"weighted" = "green"
),
labels=c("curve" = "curve ",
"line" = "line ",
"weighted" = "weighted line "),
breaks=c("curve", "line", "weighted")) +
geom_label(aes(y=label_y, x=label_x, label=label),
hjust=1, size=10, fill="white",
label.padding = unit(0.65, "char"),
alpha=0.75
)
# this shows the line a bit more clearly - also shows the discrete nature of samples
headroom <- 0.000 ### 0mV
g2 <- g1 +
geom_point(aes(y=voltage,
color=cut(samples_df$w1, c(-1, 1, 2, 3, 3.8, 4))),
size=2, shape=8) +
coord_cartesian(ylim=c(min(predict(model_poly1_pass1, list(x=samples_df$time)),
predict(model_poly1_pass2, list(x=samples_df$time))
) - headroom,
max(predict(model_poly1_pass1, list(x=samples_df$time)),
predict(model_poly1_pass2, list(x=samples_df$time))
) + headroom))
st <- shapiro.test(samples_df$res_poly1_pass1)
g3 <- ggplot(samples_df, aes(x=time)) +
ggtitle(paste("P1 pad voltage", conditions),
subtitle="Residuals from first fitted line"
) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, 'char'),
panel.grid.minor.y=element_blank()) +
scale_x_continuous(labels=function(x) { x * 1000.0 }) +
labs(x="time (ms)", y="voltage (mV)") +
geom_point(aes(y=res_poly1_pass1*1000,
color=cut(samples_df$w1, c(-1, 1, 2, 3, 3.8, 4))),
size=3.5) +
# guides(size=FALSE, linetype=FALSE) +
scale_color_manual(values=c("(-1,1]" ="red",
"(1,2]" = "orangered",
"(2,3]" = "orange",
"(3,3.8]" = "gray80",
"(3.8,4]" = "gray30",
"curve" = "grey",
"line" = "purple",
"weighted" = "green"
),
labels=c("curve" = "curve ",
"line" = "line ",
"weighted" = "weighted line "),
breaks=c("curve", "line", "weighted"))
### The joys of working around dynamic scoping for binw
### there's probably a more elegant way of doing this
mkdnorm <- function(dat, binw, hfactor, sdfactor, meanoffset) {
l_dat <- dat
l_binw <- binw
l_hfactor <- hfactor
l_sdfactor <- sdfactor
l_meanoffset <- meanoffset
return (function(x) {
dnorm(x,
mean = mean(l_dat) + l_meanoffset,
sd = sd(l_dat * l_sdfactor)) * l_binw * length(l_dat) * l_hfactor })
}
mid_sample <- round(mean(samples_df$samples) / sample_spacing) * sample_spacing
sdoffset <- round((mean(samples_df$samples) + sd(samples_df$samples)) /
sample_spacing) * sample_spacing - mid_sample
mid_plus_sd_sample = mid_sample + sdoffset
mid_minus_sd_sample = mid_sample - sdoffset
lfunc <- mkdnorm(samples_df$samples, 1,
sample_spacing, 1, 0)
lfunc_s2h2 <- mkdnorm(samples_df$samples, 1,
sample_spacing, mod_sd_factor, 0) ### 0.4 or 0.3 can look better
g4prequel <- ggplot(samples_df, aes(x=samples)) +
ggtitle(paste("P1 pad samples", conditions),
subtitle="outliers not shown") +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, "char"),
panel.grid.minor.y=element_blank()) +
theme(legend.key.width = unit(3,"char")) +
### unclear why I need to set width here to get constant width
geom_bar(aes(
fill=factor(ifelse(samples==mid_sample,
"mid",
ifelse(samples==mid_plus_sd_sample |
samples==mid_minus_sd_sample, "sds", "rest")))),
width=sample_spacing * 0.8) +
scale_x_continuous(labels=function(x) { sprintf("%d\n(%.1fmV)", x, x * conv_factor * 1000.0) } ) +
labs(x="samples") +
stat_function(aes(linetype="norm"),
fun=lfunc, color="blue3", n=301, lwd=1.25) +
stat_function(aes(linetype="modnorm"),
fun=lfunc_s2h2, color="blue", n=501, lwd=1.25) +
guides(fill = guide_legend(order = 1),
linetype = guide_legend(order = 2)) +
scale_linetype_manual(name=NULL,
values=c("norm" = "dashed",
"modnorm" = "solid"
),
labels=c("norm" = "normal distribution ",
"modnorm" = "modified sd normal dist. "),
breaks=c("norm", "modnorm")) +
scale_fill_manual(name=NULL,
values=c("mid"="orangered2",
"sds"="orange2",
"rest"="grey60",
"norm"="dashed",
"modnorm"="solid"
),
labels=c("mid"="mean ",
"sds"="+/-1 sd "),
breaks=c("mid",
"sds")
) +
### Add lines in case values aren't present, issue on the sds
geom_vline(xintercept=c(mid_minus_sd_sample, mid_plus_sd_sample),
colour="orange2", lwd=sample_spacing / 8, linetype="dotted", alpha=0.5) +
geom_vline(xintercept=mid_sample,
colour="orangered2", lwd=sample_spacing / 8, linetype="dotted", alpha=0.75) +
coord_cartesian(xlim=c(mid_sample-sample_spacing*50,
mid_sample+sample_spacing*50))
### Histogram time - do several to give option to animate them
### Fixing the x width makes a lot more sense for animation comparison
### but letting the y axis auto-range works well
binwidthds_mv = c(8, 4, 2, 1, 0.5)
ghists <- vector("list", length(binwidthds_mv))
idx <- 1
for (binw in binwidthds_mv) {
lfunc <- mkdnorm(samples_df$res_poly1_pass1*1000, binw,
1, 1, 0)
lfunc_s2h2 <- mkdnorm(samples_df$res_poly1_pass1*1000, binw,
1.0, mod_sd_factor, 0) ### 0.4 looks better on central part
gh <- ggplot(samples_df, aes(x=time)) +
ggtitle(paste("P1 pad voltage", conditions),
subtitle="Histogram of residuals from first fitted line"
) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, "char"),
panel.grid.minor.y=element_blank()) +
theme(legend.key.width = unit(3,"char")) +
geom_histogram(aes(res_poly1_pass1*1000, fill="line"),
color="black",
binwidth=binw) +
guides(fill=FALSE) +
scale_y_continuous(labels=function(x) { sprintf("%04d", x) } ) +
labs(x="voltage (mV)") +
### n is usually 101 which looks poor on the modified normal
stat_function(aes(linetype="norm"),
fun=lfunc, color="blue3", n=301, lwd=1.25, show.legend=TRUE) +
stat_function(aes(linetype="modnorm"),
fun=lfunc_s2h2, color="blue", n=501, lwd=1.25, show.legend=TRUE) +
scale_linetype_manual(values=c("norm" = "dashed",
"modnorm" = "solid"),
labels=c("norm" = "normal distribution ",
"modnorm" = "modified sd normal dist. "),
breaks=c("norm", "modnorm")) +
coord_cartesian(xlim=c(min(samples_df$res_poly1_pass1)*1000,
max(samples_df$res_poly1_pass1)*1000))
ghists[[idx]] <- gh
idx <- idx + 1
}
binw <- 30
samples_df$wslabel_x <- c(3, NApadding)
min_res <- min(samples_df$res_poly1_pass1)
max_res <- max(samples_df$res_poly1_pass1)
samples_df$wslabel_y <- c(min_res + (max_res - min_res) * 0.15, NApadding) * 1000.0
samples_df$wslabel <- c(sprintf(" %s\n W=%.3f\n p-value=%.4e",
st$method, st$statistic, st$p.value),
NApadding)
g5 <- ggplot(samples_df, aes(sample = res_poly1_pass1*1000, color="line")) +
ggtitle(paste("P1 pad voltage", conditions),
subtitle="Q-Q of fitted line residuals vs normal distribution"
) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, 'char'),
panel.grid.minor.y=element_blank()) +
stat_qq(shape=21, size=4, show.legend=FALSE) +
stat_qq(shape=21, size=5, color="black", show.legend=FALSE) +
stat_qq_line(lwd=2, show.legend=FALSE) +
geom_label(aes(y=wslabel_y, x=wslabel_x, label=wslabel),
hjust=1, size=10, color="black", fill="white",
label.padding = unit(0.65, "char"),
alpha=0.8
)
### pp plot based on tips on
### https://homepage.divms.uiowa.edu/~luke/classes/STAT4580/qqpp.html
### Feels like i should clip the abline but probably needs use of segment
m <- mean(samples_df$res_poly1_pass1)
s <- sd(samples_df$res_poly1_pass1)
n <- nrow(samples_df)
p <- (1 : n) / n - 0.5 / n
g6 <- ggplot(samples_df) +
ggtitle(paste("P1 pad voltage", conditions),
subtitle="P-P of fitted line residuals vs normal distribution"
) +
theme_light(base_size=28) +
theme(plot.title=element_text(hjust=0.5, size=34),
plot.subtitle=element_text(hjust=0.5, size=28),
legend.position="top", legend.title=element_blank(),
legend.spacing.x = unit(0.5, 'char'),
panel.grid.minor.y=element_blank()) +
labs(x="theoretical cumulative distribution (normal)", y="cumulative distribution of samples") +
geom_point(aes(x=p, y=pnorm(sort(res_poly1_pass1), m, s))) +
geom_abline(aes(color="aline", slope=1, intercept=0), lwd=2, show.legend=FALSE)
#g1
filebase <- paste0(dataname, "-")
filename <- "samples-fitted-lines"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g1,
dpi=100, height=12, width=16, units="in")
filename <- "samples-fitted-lines-zoom"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g2,
dpi=100, height=12, width=16, units="in")
filename <- "fitted-line-residuals"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g3,
dpi=100, height=12, width=16, units="in")
filename <- "central-samples"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g4prequel,
dpi=100, height=12, width=16, units="in")
idx <- 1
for (bins in binwidthds_mv) {
filename <- sprintf("fitted-line-residuals-histogram-%03d", idx)
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
ghists[[idx]],
dpi=100, height=12, width=16, units="in")
idx <- idx + 1
}
filename <- "fitted-line-residuals-qq"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g5,
dpi=100, height=12, width=16, units="in")
filename <- "fitted-line-residuals-pp"
### Save as 1600x1200 (4:3) png
ggsave(paste(filebase, filename, ".png", sep=""),
g6,
dpi=100, height=12, width=16, units="in")
}
combo_df <- data.frame(samples=c(#(samples1-mean(samples1)) * conv_factor,
#(samples2-mean(samples2)) * conv_factor,
#(samples3-mean(samples3)) * conv_factor,
(samples4-mean(samples4)) * conv_factor,
(batt_samples5-mean(batt_samples5)) * conv_factor,
(batt_samples6-mean(batt_samples6)) * conv_factor),
id=c(#rep("sample1", 1000),
#rep("sample2", 1000),
#rep("sample3", 1000),
rep("sample4", 1000),
rep("sample5", 1000),
rep("sample6", 1000))
)
### Playing around with violin plots
## ggplot(combo_df, aes(x=id, y=samples*1000, fill=id)) + geom_violin() + geom_boxplot(width=0.2, fill="white") + coord_cartesian(ylim=c(-25,25))
|
#!/usr/bin/env Rscript
# via https://geoinformatics.uk/posts/r-patterns.html
obj_file = function(path) {
return(source(path)$value)
}
main = function() {
f = obj_file("object.R")
for (x in 2:6) {
print(f(x))
}
print(ls())
}
if (sys.nframe() == 0) {
main()
}
| /class/main.R | no_license | albertgoncalves/r_designs | R | false | false | 296 | r | #!/usr/bin/env Rscript
# via https://geoinformatics.uk/posts/r-patterns.html
obj_file = function(path) {
return(source(path)$value)
}
main = function() {
f = obj_file("object.R")
for (x in 2:6) {
print(f(x))
}
print(ls())
}
if (sys.nframe() == 0) {
main()
}
|
library(tidyverse)
library(dplyr)
library(tidyr)
library(ggplot2)
# to create a visualization based on dataset that shows which democratic party candidate
# has the most weekly cable channel video/audio clips from CNN FOX MSNBC
# December 30th, 2018, to September 8th, 2019
# line graph to see which candidate has most views over time.
politics <- cable_weekly
politics
str(politics)
class(politics)
colnames(politics)
rownames(politics)
unique(politics$name)
politics$date <- as.Date(as.character(politics$date))
max(politics$matched_clips) # 3393
min(politics$matched_clips) # 0
max(politics$date) # 2019-09-08
min(politics$date) # 2018-12-30
politics <- select(politics, date, name, matched_clips)
politics <- data.table::setcolorder(politics, c("name", "matched_clips", "date"))
politics
politics1 <- pivot_wider(politics, id_cols = NULL, names_from = name, values_from = matched_clips)
politics1
politics1 <- select(politics1, 'date','Andrew Yang','Bernie Sanders','Pete Buttigieg',
'Elizabeth Warren','Joe Biden','Cory Booker')
politics1
politics2 <- pivot_longer(politics1, c('Andrew Yang','Bernie Sanders','Pete Buttigieg','Elizabeth Warren',
'Joe Biden','Cory Booker'), names_to = 'name', values_to = 'matched_clips')
politics2
str(politics2)
ggplot(politics2, aes(x = date, y = matched_clips, color = name, group = name)) +
geom_line(linetype = 1, size = 0.8) +
ggtitle('Democratic Political Candidate Exposure') +
xlab('December 30, 2018 - September 30, 2019') +
ylab('Number of Commercials on Cable TV') +
scale_x_discrete() +
scale_y_continuous(breaks = scales::pretty_breaks(15))
| /Democratic Candidates Commercials.R | no_license | artwang31/Democractic-Candidates-Exposure-Visualization | R | false | false | 1,679 | r | library(tidyverse)
library(dplyr)
library(tidyr)
library(ggplot2)
# to create a visualization based on dataset that shows which democratic party candidate
# has the most weekly cable channel video/audio clips from CNN FOX MSNBC
# December 30th, 2018, to September 8th, 2019
# line graph to see which candidate has most views over time.
politics <- cable_weekly
politics
str(politics)
class(politics)
colnames(politics)
rownames(politics)
unique(politics$name)
politics$date <- as.Date(as.character(politics$date))
max(politics$matched_clips) # 3393
min(politics$matched_clips) # 0
max(politics$date) # 2019-09-08
min(politics$date) # 2018-12-30
politics <- select(politics, date, name, matched_clips)
politics <- data.table::setcolorder(politics, c("name", "matched_clips", "date"))
politics
politics1 <- pivot_wider(politics, id_cols = NULL, names_from = name, values_from = matched_clips)
politics1
politics1 <- select(politics1, 'date','Andrew Yang','Bernie Sanders','Pete Buttigieg',
'Elizabeth Warren','Joe Biden','Cory Booker')
politics1
politics2 <- pivot_longer(politics1, c('Andrew Yang','Bernie Sanders','Pete Buttigieg','Elizabeth Warren',
'Joe Biden','Cory Booker'), names_to = 'name', values_to = 'matched_clips')
politics2
str(politics2)
ggplot(politics2, aes(x = date, y = matched_clips, color = name, group = name)) +
geom_line(linetype = 1, size = 0.8) +
ggtitle('Democratic Political Candidate Exposure') +
xlab('December 30, 2018 - September 30, 2019') +
ylab('Number of Commercials on Cable TV') +
scale_x_discrete() +
scale_y_continuous(breaks = scales::pretty_breaks(15))
|
#' @export
cellNames = function(v){
v = v@colData
v = sapply(1:nrow(v), function(i){paste0(v[i,1], '-', v[i,2]) }) # n * 2 ?
return(v)
}
| /R/cellNames.R | permissive | unistbig/CellEnrich | R | false | false | 144 | r | #' @export
cellNames = function(v){
v = v@colData
v = sapply(1:nrow(v), function(i){paste0(v[i,1], '-', v[i,2]) }) # n * 2 ?
return(v)
}
|
testlist <- list(bytes1 = c(-1L, NA, -1L), pmutation = NaN)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612886945-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 123 | r | testlist <- list(bytes1 = c(-1L, NA, -1L), pmutation = NaN)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
library(fwPackage, lib.loc = "package")
library(lattice)
library(tikzDevice)
ltheme <- canonical.theme(color = FALSE) ## in-built B&W theme
ltheme$strip.background$col <- "transparent" ## change strip bg
lattice.options(default.theme = ltheme) ## set as default
dbc <- dbConnect(dbDriver("SQLite"), dbname = "data/simulations.db")
d <- dbGetQuery(dbc, "
select * from (select ntest, isim, idgp, transform, label, avg(reject) as reject
from interval
where label='size'
and ntest >= 10
and idgp in (1,2)
and transform = 'clarkwest'
and scheme = 'fix'
group by ntest, isim, idgp, transform, label) s
join nobs n join coefficients c on n.i=s.isim and c.i=s.idgp")
dbDisconnect(dbc)
d$nlabel <- sprintf("T=%d", d$n)
d$normlabel <- sprintf("c=%d", d$norm)
tikz(file = "floats/mc-clarkwestsize.tex", width = 6, height = 4.5)
xyplot(reject ~ I(ntest/n) | interaction(nlabel,altlabel,normlabel, sep = ", "),
data = d,
ylab = "Rejection Probability", xlab = "$P/T$",
panel = function(x,y,...) {
panel.lines(c(0,2/3),c(.1,.1), col = "lightgray")
panel.xyplot(x,y,...,type = "l")
},
layout = c(4,3),
par.strip.text = list(cex = .55),
index.cond = list(c(3, 6, 9, 12, 2, 5, 8, 11, 1, 4, 7, 10)))
dev.off()
| /R/mc-clarkwestsize.R | no_license | grayclhn/oos-overfit | R | false | false | 1,395 | r | library(fwPackage, lib.loc = "package")
library(lattice)
library(tikzDevice)
ltheme <- canonical.theme(color = FALSE) ## in-built B&W theme
ltheme$strip.background$col <- "transparent" ## change strip bg
lattice.options(default.theme = ltheme) ## set as default
dbc <- dbConnect(dbDriver("SQLite"), dbname = "data/simulations.db")
d <- dbGetQuery(dbc, "
select * from (select ntest, isim, idgp, transform, label, avg(reject) as reject
from interval
where label='size'
and ntest >= 10
and idgp in (1,2)
and transform = 'clarkwest'
and scheme = 'fix'
group by ntest, isim, idgp, transform, label) s
join nobs n join coefficients c on n.i=s.isim and c.i=s.idgp")
dbDisconnect(dbc)
d$nlabel <- sprintf("T=%d", d$n)
d$normlabel <- sprintf("c=%d", d$norm)
tikz(file = "floats/mc-clarkwestsize.tex", width = 6, height = 4.5)
xyplot(reject ~ I(ntest/n) | interaction(nlabel,altlabel,normlabel, sep = ", "),
data = d,
ylab = "Rejection Probability", xlab = "$P/T$",
panel = function(x,y,...) {
panel.lines(c(0,2/3),c(.1,.1), col = "lightgray")
panel.xyplot(x,y,...,type = "l")
},
layout = c(4,3),
par.strip.text = list(cex = .55),
index.cond = list(c(3, 6, 9, 12, 2, 5, 8, 11, 1, 4, 7, 10)))
dev.off()
|
library(Amelia)
library(dplyr)
library(magrittr)
dat <- readr::read_csv("data/model_data.csv", na="<NA>") %>%
unique %>%
arrange(entry, year)
to_skip <- c(
"country_name",
"iso3c",
"no_report",
# "pol_shift",
"pol_shift_left",
"pol_shift_right",
"continent",
"who_region",
"signature",
"sum_art05",
"sum_art06",
"sum_art08",
"sum_art11",
"sum_art13",
"sum_art14",
"year_signature",
"year_ratification",
"Years since Ratif.",
"Years since Sign.",
colnames(dat)[grepl("^art[0-9]+exp", colnames(dat))],
"Eastern Mediterranean"
)
set.seed(17778841)
ans <- amelia(
x = as.data.frame(dat),
m = 10,
idvars = to_skip,
ts = "year",
polytime = 1,
cs = "entry",
logs = c("gdp_percapita_ppp", "health_exp", "birth_death"),
sqrts = c("population", "tobacco_prod"),
lgstc = c("smoke_female", "smoke_male"),
emburn = c(10, 1000)
)
View(tibble(
country = dat$country_name,
year = dat$year,
smk_fem_im1 = round(ans$imputations$imp1$smoke_female, 2),
smk_fem_im2 = round(ans$imputations$imp2$smoke_female, 2),
smk_fem_im3 = round(ans$imputations$imp3$smoke_female, 2),
smk_fem_im4 = round(ans$imputations$imp5$smoke_female, 2),
smk_fem_im5 = round(ans$imputations$imp5$smoke_female, 2),
smk_mal_im1 = round(ans$imputations$imp1$smoke_male, 2),
smk_mal_im2 = round(ans$imputations$imp2$smoke_male, 2),
smk_mal_im3 = round(ans$imputations$imp3$smoke_male, 2),
smk_mal_im4 = round(ans$imputations$imp5$smoke_male, 2),
smk_mal_im5 = round(ans$imputations$imp5$smoke_male, 2),
smk_fem = dat$smoke_female,
smk_mal = dat$smoke_male
), "Imputed")
View(tibble(
country = dat$country_name,
year = dat$year,
health_exp_im1 = round(ans$imputations$imp1$health_exp, 2),
health_exp_im2 = round(ans$imputations$imp2$health_exp, 2),
health_exp_im3 = round(ans$imputations$imp3$health_exp, 2),
health_exp_im4 = round(ans$imputations$imp5$health_exp, 2),
health_exp_im5 = round(ans$imputations$imp5$health_exp, 2),
health_exp = dat$health_exp
), "Imputed")
graphics.off()
pdf("data/multiple-imputation.pdf")
missmap(ans, y.cex = .25, x.cex = .5)
dev.off()
# Rescaling variables ----------------------------------------------------------
rescale_data <- function(dat) {
dat$tobac_prod_pp <- with(dat, tobacco_prod/population)
dat$bloomberg_amount_pp <- with(dat, bloomberg_amount/population)
dat$bloomberg_fctc_amount_pp <- with(dat, bloomberg_fctc_amount/population)
dat$logPopulation <- log(dat$population)
dat$logTobac_prod_pp <- log(dat$tobac_prod_pp)
dat$logHealth_exp <- log(dat$health_exp)
dat$logGDP_percapita_ppp <- log(dat$gdp_percapita_ppp)
# Replacing infinite values with NAs
replace_not_finite <- function(x) {
ifelse(!is.finite(x), NA, x)
}
dat <- dat %>%
mutate_if(is.numeric, replace_not_finite)
# Adding lags
# Year fixed effects: 2010 as reference
year0_1 <- model.matrix(~0+factor(year), dat)
colnames(year0_1) <- gsub(".+([0-9]{4})$", "Year \\1", colnames(year0_1))
dat <- cbind(dat, year0_1[,-1])
dat %>%
arrange(entry, year) %>%
group_by(entry) %>%
mutate(
sum_art05_lagged = lag(sum_art05),
sum_art06_lagged = lag(sum_art06),
sum_art08_lagged = lag(sum_art08),
sum_art11_lagged = lag(sum_art11),
sum_art13_lagged = lag(sum_art13),
sum_art14_lagged = lag(sum_art14)
)
# {
# cat(sprintf("%30s: Yes\n", v))
# } else
# cat(sprintf("%30s: No\n", v))
# # dat[[v]] <- dat[[v]]/sd(dat[[v]])
# Including interest on policy (subscribed to GL posts) ------------------------
# dat
}
ans$imputations <- ans$imputations %>% lapply(rescale_data)
readr::write_csv(rescale_data(dat), path = "data/multiple_imputation.csv")
write.amelia(ans, file.stem = "data/multiple-imputation")
| /data/multiple-imputation.r | no_license | gvegayon/fctc | R | false | false | 3,929 | r | library(Amelia)
library(dplyr)
library(magrittr)
dat <- readr::read_csv("data/model_data.csv", na="<NA>") %>%
unique %>%
arrange(entry, year)
to_skip <- c(
"country_name",
"iso3c",
"no_report",
# "pol_shift",
"pol_shift_left",
"pol_shift_right",
"continent",
"who_region",
"signature",
"sum_art05",
"sum_art06",
"sum_art08",
"sum_art11",
"sum_art13",
"sum_art14",
"year_signature",
"year_ratification",
"Years since Ratif.",
"Years since Sign.",
colnames(dat)[grepl("^art[0-9]+exp", colnames(dat))],
"Eastern Mediterranean"
)
set.seed(17778841)
ans <- amelia(
x = as.data.frame(dat),
m = 10,
idvars = to_skip,
ts = "year",
polytime = 1,
cs = "entry",
logs = c("gdp_percapita_ppp", "health_exp", "birth_death"),
sqrts = c("population", "tobacco_prod"),
lgstc = c("smoke_female", "smoke_male"),
emburn = c(10, 1000)
)
View(tibble(
country = dat$country_name,
year = dat$year,
smk_fem_im1 = round(ans$imputations$imp1$smoke_female, 2),
smk_fem_im2 = round(ans$imputations$imp2$smoke_female, 2),
smk_fem_im3 = round(ans$imputations$imp3$smoke_female, 2),
smk_fem_im4 = round(ans$imputations$imp5$smoke_female, 2),
smk_fem_im5 = round(ans$imputations$imp5$smoke_female, 2),
smk_mal_im1 = round(ans$imputations$imp1$smoke_male, 2),
smk_mal_im2 = round(ans$imputations$imp2$smoke_male, 2),
smk_mal_im3 = round(ans$imputations$imp3$smoke_male, 2),
smk_mal_im4 = round(ans$imputations$imp5$smoke_male, 2),
smk_mal_im5 = round(ans$imputations$imp5$smoke_male, 2),
smk_fem = dat$smoke_female,
smk_mal = dat$smoke_male
), "Imputed")
View(tibble(
country = dat$country_name,
year = dat$year,
health_exp_im1 = round(ans$imputations$imp1$health_exp, 2),
health_exp_im2 = round(ans$imputations$imp2$health_exp, 2),
health_exp_im3 = round(ans$imputations$imp3$health_exp, 2),
health_exp_im4 = round(ans$imputations$imp5$health_exp, 2),
health_exp_im5 = round(ans$imputations$imp5$health_exp, 2),
health_exp = dat$health_exp
), "Imputed")
graphics.off()
pdf("data/multiple-imputation.pdf")
missmap(ans, y.cex = .25, x.cex = .5)
dev.off()
# Rescaling variables ----------------------------------------------------------
rescale_data <- function(dat) {
dat$tobac_prod_pp <- with(dat, tobacco_prod/population)
dat$bloomberg_amount_pp <- with(dat, bloomberg_amount/population)
dat$bloomberg_fctc_amount_pp <- with(dat, bloomberg_fctc_amount/population)
dat$logPopulation <- log(dat$population)
dat$logTobac_prod_pp <- log(dat$tobac_prod_pp)
dat$logHealth_exp <- log(dat$health_exp)
dat$logGDP_percapita_ppp <- log(dat$gdp_percapita_ppp)
# Replacing infinite values with NAs
replace_not_finite <- function(x) {
ifelse(!is.finite(x), NA, x)
}
dat <- dat %>%
mutate_if(is.numeric, replace_not_finite)
# Adding lags
# Year fixed effects: 2010 as reference
year0_1 <- model.matrix(~0+factor(year), dat)
colnames(year0_1) <- gsub(".+([0-9]{4})$", "Year \\1", colnames(year0_1))
dat <- cbind(dat, year0_1[,-1])
dat %>%
arrange(entry, year) %>%
group_by(entry) %>%
mutate(
sum_art05_lagged = lag(sum_art05),
sum_art06_lagged = lag(sum_art06),
sum_art08_lagged = lag(sum_art08),
sum_art11_lagged = lag(sum_art11),
sum_art13_lagged = lag(sum_art13),
sum_art14_lagged = lag(sum_art14)
)
# {
# cat(sprintf("%30s: Yes\n", v))
# } else
# cat(sprintf("%30s: No\n", v))
# # dat[[v]] <- dat[[v]]/sd(dat[[v]])
# Including interest on policy (subscribed to GL posts) ------------------------
# dat
}
ans$imputations <- ans$imputations %>% lapply(rescale_data)
readr::write_csv(rescale_data(dat), path = "data/multiple_imputation.csv")
write.amelia(ans, file.stem = "data/multiple-imputation")
|
library("dplyr")
addSemiFrequencies <- function(pop_df) {
pop_df <- pop_df %>% group_by_(~Generation) %>%
mutate(Frequency = (Population / sum(Population)) / 2) %>%
ungroup()
pop_df$Population <- pop_df$Population / 2 # because of the duplication
pop_df$Frequency[is.nan(pop_df$Frequency)] <- 0
return(pop_df)
}
args = commandArgs(trailingOnly=TRUE)
filename <- args[1]
pop_df <- read.table(filename, sep = "\t", header = TRUE)
result <- addSemiFrequencies(pop_df)
write.table(result, args[2], sep = "\t") | /tests/data/scripts_ggmuller/rscript.getmullerdf.addsemifrequencies.r | permissive | cdeitrick/Lolipop | R | false | false | 519 | r | library("dplyr")
addSemiFrequencies <- function(pop_df) {
pop_df <- pop_df %>% group_by_(~Generation) %>%
mutate(Frequency = (Population / sum(Population)) / 2) %>%
ungroup()
pop_df$Population <- pop_df$Population / 2 # because of the duplication
pop_df$Frequency[is.nan(pop_df$Frequency)] <- 0
return(pop_df)
}
args = commandArgs(trailingOnly=TRUE)
filename <- args[1]
pop_df <- read.table(filename, sep = "\t", header = TRUE)
result <- addSemiFrequencies(pop_df)
write.table(result, args[2], sep = "\t") |
library(ggplot2)
source("linear_regression.R")
lr_model <- LinearRegression()
set.seed(1234)
# geberate N x D data samples
N <- 200 # number of all data point (test and train)
D <- 4 # number of features/attributes
data <- data.frame(matrix(runif(D * N), nrow = N, ncol = D))
# generate the labels
coeff <- matrix(c(-5, -3, 4, 5, 10), D + 1, 1) # the real coefficient to be estimated
data <- cbind(data, 'Y' = as.matrix(cbind(1, data[, 1:D])) %*% coeff)
# add gaussian noise the labels (just to make it a little more challenging)
data$Y <- data$Y + rnorm(N, mean = 0, sd = 1)
train.len <- N / 2
train.index <- sample(1:N, train.len)
train.data <- data[train.index, 1:D]
train.label <- data[train.index, 'Y']
test.data <- data[-train.index, 1:D]
test.label <- data[-train.index, 'Y']
lr_model <- lr_model$fit(train.data, train.label, epochs = 1000, learning_rate = 0.1, batch_size = 100)
dim(lr_model$history)
lr_model$b
lr_model$w
# l$history
ggplot(data = lr_model$history, aes(x = epoch)) +
geom_line(aes(y = w1, color = "w1")) +
geom_line(aes(y = w2, color = "w2")) +
geom_line(aes(y = w3, color = "w3")) +
geom_line(aes(y = w4, color = "w4")) +
geom_line(aes(y = b, color = "b")) +
theme_minimal()
lr_model$predict(test.data) >= 0 | /linear_regression_demo.R | permissive | ControlNet/ml-algorithms | R | false | false | 1,251 | r | library(ggplot2)
source("linear_regression.R")
lr_model <- LinearRegression()
set.seed(1234)
# geberate N x D data samples
N <- 200 # number of all data point (test and train)
D <- 4 # number of features/attributes
data <- data.frame(matrix(runif(D * N), nrow = N, ncol = D))
# generate the labels
coeff <- matrix(c(-5, -3, 4, 5, 10), D + 1, 1) # the real coefficient to be estimated
data <- cbind(data, 'Y' = as.matrix(cbind(1, data[, 1:D])) %*% coeff)
# add gaussian noise the labels (just to make it a little more challenging)
data$Y <- data$Y + rnorm(N, mean = 0, sd = 1)
train.len <- N / 2
train.index <- sample(1:N, train.len)
train.data <- data[train.index, 1:D]
train.label <- data[train.index, 'Y']
test.data <- data[-train.index, 1:D]
test.label <- data[-train.index, 'Y']
lr_model <- lr_model$fit(train.data, train.label, epochs = 1000, learning_rate = 0.1, batch_size = 100)
dim(lr_model$history)
lr_model$b
lr_model$w
# l$history
ggplot(data = lr_model$history, aes(x = epoch)) +
geom_line(aes(y = w1, color = "w1")) +
geom_line(aes(y = w2, color = "w2")) +
geom_line(aes(y = w3, color = "w3")) +
geom_line(aes(y = w4, color = "w4")) +
geom_line(aes(y = b, color = "b")) +
theme_minimal()
lr_model$predict(test.data) >= 0 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_formals.R
\name{extract_formals}
\alias{extract_formals}
\title{Extract the formals from a \code{roxy_block} object}
\usage{
extract_formals(roxy_block)
}
\arguments{
\item{roxy_block}{A \code{roxy_block} object}
}
\value{
A \code{data.frame} with a row for each formal, its value, and the
function it belongs to
}
\description{
Extract the formals from a \code{roxy_block} object
}
| /man/extract_formals.Rd | permissive | amoeba/pkgsci | R | false | true | 469 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_formals.R
\name{extract_formals}
\alias{extract_formals}
\title{Extract the formals from a \code{roxy_block} object}
\usage{
extract_formals(roxy_block)
}
\arguments{
\item{roxy_block}{A \code{roxy_block} object}
}
\value{
A \code{data.frame} with a row for each formal, its value, and the
function it belongs to
}
\description{
Extract the formals from a \code{roxy_block} object
}
|
library(magrittr)
library(tidyverse)
library(broom)
basic_load <- read.csv(
"file:///C:/Users/rrogers/Documents/personalcode/COVID19_comparison_graph/data/nst-est2019-alldata.csv",
na.strings = c(".", "NA", "", "?"),
strip.white = TRUE, encoding = "UTF-8",
stringsAsFactors = FALSE
)
summaryinfo <- basic_load[basic_load$SUMLEV != 40, ]
filtered_load <- basic_load[basic_load$SUMLEV == 40, ]
filtered_load <- filtered_load[
,
c("REGION", "NAME", grep("(2018|2019)", colnames(filtered_load),
value = TRUE
))
]
filtered_load %<>% filter(REGION!="X")
filtered_load <- filtered_load[1:18]
library(rattle)
rattle()
kdata<- sapply(na.omit(crs$dataset[, crs$numeric]), rescaler, "range")
row.names(kdata)<-crs$dataset$NAME
kmodel <-kmeans(kdata,4)
fviz_cluster(kmodel, kdata, repel = TRUE)
look<-fix_data_frame(data.frame(kdata), newcol= "NAME")[c( "NAME", grep("MIG", colnames(kdata), value = TRUE))]
| /simple test.R | no_license | rowrowrowyourboat/COVID19_comparison_graph | R | false | false | 971 | r | library(magrittr)
library(tidyverse)
library(broom)
basic_load <- read.csv(
"file:///C:/Users/rrogers/Documents/personalcode/COVID19_comparison_graph/data/nst-est2019-alldata.csv",
na.strings = c(".", "NA", "", "?"),
strip.white = TRUE, encoding = "UTF-8",
stringsAsFactors = FALSE
)
summaryinfo <- basic_load[basic_load$SUMLEV != 40, ]
filtered_load <- basic_load[basic_load$SUMLEV == 40, ]
filtered_load <- filtered_load[
,
c("REGION", "NAME", grep("(2018|2019)", colnames(filtered_load),
value = TRUE
))
]
filtered_load %<>% filter(REGION!="X")
filtered_load <- filtered_load[1:18]
library(rattle)
rattle()
kdata<- sapply(na.omit(crs$dataset[, crs$numeric]), rescaler, "range")
row.names(kdata)<-crs$dataset$NAME
kmodel <-kmeans(kdata,4)
fviz_cluster(kmodel, kdata, repel = TRUE)
look<-fix_data_frame(data.frame(kdata), newcol= "NAME")[c( "NAME", grep("MIG", colnames(kdata), value = TRUE))]
|
#################################
#### Double Sort
load("F:/我的论文/第五篇/RData/da_all_m.RData")
#load("F:/我的论文/第五篇/RData/da_beta_5y.RData")
load("F:/我的论文/第五篇/主代码/beta anomaly/monthly data in five years/RData/da_beta_5y.RData")
da_m <- merge(da_all_m,da_beta_5y,by=c("ym","SecCode"))
####
ym_index <- sort(unique(da_m$ym))
k <- 5 # 5*5 portfolios
ret_p <- array(NA,c(length(ym_index),k,k)) # p denotes portfolio
# the first k corresponds to the number of groups of variable of interest
# the second k corresponds to the number of groups of control variable
# i,p and j corresponds to max(da_w$w), the first k and the second k below
for (i in 1:length(ym_index)) {
da_sub <- da_m[ym==ym_index[i],]
# Below is the control variable.
da_sub <- da_sub[order(be),]
n_mid <- floor(nrow(da_sub)/k)
if ((nrow(da_sub)-n_mid*(k-2))%%2==0){
n_f <- (nrow(da_sub)-n_mid*(k-2))/2 # f denotes first, l denotes last
n_l <- n_f
} else {
n_f <- (nrow(da_sub)-n_mid*(k-2)-1)/2
n_l <- n_f+1
}
x <- seq(from=n_f,to=nrow(da_sub),by=n_mid)[1:(k-1)]
x <- c(x,nrow(da_sub))
da_sub$group_n1 <- cut(1:nrow(da_sub), c(0,x),labels = 1:k)
for (j in 1:k) {
da_ss <- da_sub[group_n1==j,] # ss denotes the subset of the subset
# Below is variable of interest
da_ss <- da_ss[order(max_ret),]
n_mid <- floor(nrow(da_ss)/k)
if ((nrow(da_ss)-n_mid*(k-2))%%2==0){
n_f <- (nrow(da_ss)-n_mid*(k-2))/2 # f denotes the first, l the denotes last
n_l <- n_f
} else {
n_f <- (nrow(da_ss)-n_mid*(k-2)-1)/2
n_l <- n_f+1
}
x <- seq(from=n_f,to=nrow(da_ss),by=n_mid)[1:(k-1)]
x <- c(x,nrow(da_ss))
da_ss$group_n2 <- cut(1:nrow(da_ss), c(0,x),labels = 1:k)
for (p in 1:k) {
ret_p[i,p,j] <- da_ss[group_n2==p,mean(ret_e)]
#ret_p[i,p,j] <- da_ss[group_n2==p,weighted.mean(ret_e,size)]
}
}
}
ret_p_m <- matrix(NA,nrow=k+1,ncol=k+1)
colnames(ret_p_m) <- c(paste0("cv",1:k),"average") # cv denotes control variables
rownames(ret_p_m) <- c(paste0("voi",1:k),"h-l") # voi denotes variables of interest, here it's rsj
for (p in 1:k) {
for (j in 1:k) {
ret_p_m[p,j] <- mean(ret_p[,p,j],na.rm=T) # full sample
}
}
ret_p_m[1:k,k+1] <- rowMeans(ret_p_m[1:k,1:k])
ret_p_m[k+1,] <- ret_p_m[k,]-ret_p_m[1,]
ret_p_m
#### Newey-West t statistic
t_nm <- vector(length=k+1)
## For the first five t values
ret_p_hl <- matrix(NA,nrow=length(ym_index),ncol=k)
ret_p_hl_sd <- vector(length=k)
for (j in 1:k) {
ret_p_hl[,j] <- ret_p[,k,j]-ret_p[,1,j]
ret_p_hl_sd[j] <- sd(ret_p_hl[,j],na.rm=T)
}
for (j in 1:k) {
model_nm <- lm(ret_p_hl[,j] ~ 1)
t_nm[j] <- coeftest(model_nm,vcov = NeweyWest(model_nm))[1,3]
}
## For the sixth t value
ret_p_hl_average <- rowMeans((ret_p[,k,1:k]))-rowMeans((ret_p[,1,1:k]))
model_nm <- lm(ret_p_hl_average ~ 1)
t_nm[k+1] <- coeftest(model_nm,vcov = NeweyWest(model_nm))[1,3]
t_nm
#### adjusted by FF3F
ret_p_hl <- cbind(ret_p_hl,ret_p_hl_average)
ret_p_hl <- na.omit(as.data.table(ret_p_hl))
names(ret_p_hl) <- c(paste0("p",1:k),"average")
ret_p_hl$ym <- sort(unique(da_m$ym))
ret_p_hl <- merge(ret_p_hl,FF3F_A_nm,by="ym")
## Newey-West t statistic
ret_p_hl_FF3F <- matrix(NA,nrow=2,ncol=k+1)
for (j in 1:(k+1)) { # the first column is the corresponding ym
model_FF3F <- lm(ret_p_hl[[j+1]]~ret_p_hl[["mkt_e"]]+ret_p_hl[["smb"]]+ret_p_hl[["hml"]])
ret_p_hl_FF3F[1,j] <- coeftest(model_FF3F,vcov=NeweyWest(model_FF3F))[1,1]
ret_p_hl_FF3F[2,j] <- coeftest(model_FF3F,vcov=NeweyWest(model_FF3F))[1,3]
}
ret_p_hl_FF3F
| /double_sort_beta_controlled_5y.R | no_license | jaynewton/paper_5 | R | false | false | 3,573 | r | #################################
#### Double Sort
load("F:/我的论文/第五篇/RData/da_all_m.RData")
#load("F:/我的论文/第五篇/RData/da_beta_5y.RData")
load("F:/我的论文/第五篇/主代码/beta anomaly/monthly data in five years/RData/da_beta_5y.RData")
da_m <- merge(da_all_m,da_beta_5y,by=c("ym","SecCode"))
####
ym_index <- sort(unique(da_m$ym))
k <- 5 # 5*5 portfolios
ret_p <- array(NA,c(length(ym_index),k,k)) # p denotes portfolio
# the first k corresponds to the number of groups of variable of interest
# the second k corresponds to the number of groups of control variable
# i,p and j corresponds to max(da_w$w), the first k and the second k below
for (i in 1:length(ym_index)) {
da_sub <- da_m[ym==ym_index[i],]
# Below is the control variable.
da_sub <- da_sub[order(be),]
n_mid <- floor(nrow(da_sub)/k)
if ((nrow(da_sub)-n_mid*(k-2))%%2==0){
n_f <- (nrow(da_sub)-n_mid*(k-2))/2 # f denotes first, l denotes last
n_l <- n_f
} else {
n_f <- (nrow(da_sub)-n_mid*(k-2)-1)/2
n_l <- n_f+1
}
x <- seq(from=n_f,to=nrow(da_sub),by=n_mid)[1:(k-1)]
x <- c(x,nrow(da_sub))
da_sub$group_n1 <- cut(1:nrow(da_sub), c(0,x),labels = 1:k)
for (j in 1:k) {
da_ss <- da_sub[group_n1==j,] # ss denotes the subset of the subset
# Below is variable of interest
da_ss <- da_ss[order(max_ret),]
n_mid <- floor(nrow(da_ss)/k)
if ((nrow(da_ss)-n_mid*(k-2))%%2==0){
n_f <- (nrow(da_ss)-n_mid*(k-2))/2 # f denotes the first, l the denotes last
n_l <- n_f
} else {
n_f <- (nrow(da_ss)-n_mid*(k-2)-1)/2
n_l <- n_f+1
}
x <- seq(from=n_f,to=nrow(da_ss),by=n_mid)[1:(k-1)]
x <- c(x,nrow(da_ss))
da_ss$group_n2 <- cut(1:nrow(da_ss), c(0,x),labels = 1:k)
for (p in 1:k) {
ret_p[i,p,j] <- da_ss[group_n2==p,mean(ret_e)]
#ret_p[i,p,j] <- da_ss[group_n2==p,weighted.mean(ret_e,size)]
}
}
}
ret_p_m <- matrix(NA,nrow=k+1,ncol=k+1)
colnames(ret_p_m) <- c(paste0("cv",1:k),"average") # cv denotes control variables
rownames(ret_p_m) <- c(paste0("voi",1:k),"h-l") # voi denotes variables of interest, here it's rsj
for (p in 1:k) {
for (j in 1:k) {
ret_p_m[p,j] <- mean(ret_p[,p,j],na.rm=T) # full sample
}
}
ret_p_m[1:k,k+1] <- rowMeans(ret_p_m[1:k,1:k])
ret_p_m[k+1,] <- ret_p_m[k,]-ret_p_m[1,]
ret_p_m
#### Newey-West t statistic
t_nm <- vector(length=k+1)
## For the first five t values
ret_p_hl <- matrix(NA,nrow=length(ym_index),ncol=k)
ret_p_hl_sd <- vector(length=k)
for (j in 1:k) {
ret_p_hl[,j] <- ret_p[,k,j]-ret_p[,1,j]
ret_p_hl_sd[j] <- sd(ret_p_hl[,j],na.rm=T)
}
for (j in 1:k) {
model_nm <- lm(ret_p_hl[,j] ~ 1)
t_nm[j] <- coeftest(model_nm,vcov = NeweyWest(model_nm))[1,3]
}
## For the sixth t value
ret_p_hl_average <- rowMeans((ret_p[,k,1:k]))-rowMeans((ret_p[,1,1:k]))
model_nm <- lm(ret_p_hl_average ~ 1)
t_nm[k+1] <- coeftest(model_nm,vcov = NeweyWest(model_nm))[1,3]
t_nm
#### adjusted by FF3F
ret_p_hl <- cbind(ret_p_hl,ret_p_hl_average)
ret_p_hl <- na.omit(as.data.table(ret_p_hl))
names(ret_p_hl) <- c(paste0("p",1:k),"average")
ret_p_hl$ym <- sort(unique(da_m$ym))
ret_p_hl <- merge(ret_p_hl,FF3F_A_nm,by="ym")
## Newey-West t statistic
ret_p_hl_FF3F <- matrix(NA,nrow=2,ncol=k+1)
for (j in 1:(k+1)) { # the first column is the corresponding ym
model_FF3F <- lm(ret_p_hl[[j+1]]~ret_p_hl[["mkt_e"]]+ret_p_hl[["smb"]]+ret_p_hl[["hml"]])
ret_p_hl_FF3F[1,j] <- coeftest(model_FF3F,vcov=NeweyWest(model_FF3F))[1,1]
ret_p_hl_FF3F[2,j] <- coeftest(model_FF3F,vcov=NeweyWest(model_FF3F))[1,3]
}
ret_p_hl_FF3F
|
#!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set the precision to 16 digits:
options( digits = 16 );
#' Generate test fixtures.
#'
#' @examples
#' main();
main <- function() {
#' Get the script filepath.
#'
#' @return The absolute path of this script
#'
#' @examples
#' filepath <- get_script_path();
get_script_path <- function() {
args <- commandArgs( trailingOnly = FALSE );
needle <- "--file=";
match <- grep( needle, args );
if ( length( match ) > 0 ) {
# Rscript:
filepath <- sub( needle, "", args[match] );
} else {
ls_vars <- ls( sys.frames()[[1]] )
if ( "fileName" %in% ls_vars ) {
# Source'd via RStudio:
filepath <- sys.frames()[[1]]$fileName; # nolint
} else {
# Source'd via R console:
filepath <- sys.frames()[[1]]$ofile;
}
}
return( normalizePath( filepath ) );
}
#' Convert a data structure to JSON.
#'
#' @param x A data structure to convert
#' @return JSON blob
#'
#' @examples
#' x <- seq( -6.5, 25, 0.5 );
#' json <- to_json( x );
to_json <- function( x ) {
return( jsonlite::toJSON( x, digits = 16, auto_unbox = TRUE ) );
}
#' Generate an output absolute filepath based on the script directory.
#'
#' @param name An output filename
#' @return An absolute filepath
#'
#' @examples
#' filepath <- get_filepath( "data.json" );
get_filepath <- function( name ) {
return( paste( source_dir, "/", name, sep = "" ) );
}
# Get the directory of this script:
source_dir <- dirname( get_script_path() );
# Generate test fixture data:
x <- rnorm( 100 );
twosided <- t.test( x );
twosided <- list( x = x, pValue = twosided$p.value, statistic = twosided$statistic, lower = twosided$conf.int[1], upper = twosided$conf.int[2] );
x <- rnorm( 100, 2.0, 2.5 );
twosided.custom.alpha <- t.test( x, conf.level = 0.9 );
twosided.custom.alpha <- list(
x = x, pValue = twosided.custom.alpha$p.value,
statistic = twosided.custom.alpha$statistic,
lower = twosided.custom.alpha$conf.int[1],
upper = twosided.custom.alpha$conf.int[2]
);
x <- rnorm( 100 );
less <- t.test( x, alternative = "less" );
less <- list( x = x, pValue = less$p.value, statistic = less$statistic, lower = less$conf.int[1], upper = less$conf.int[2] );
x <- rnorm( 100 );
greater <- t.test( x, alternative = "greater" );
greater <- list( x = x, pValue = greater$p.value, statistic = greater$statistic, lower = greater$conf.int[1], upper = greater$conf.int[2] );
x <- rnorm( 100 );
less.custom.alpha <- t.test( x, alternative = "less", conf.level = 0.99 );
less.custom.alpha <- list( x = x, pValue = less.custom.alpha$p.value, statistic = less.custom.alpha$statistic, lower = less.custom.alpha$conf.int[1], upper = less.custom.alpha$conf.int[2] );
x <- rnorm( 100 );
greater.custom.alpha <- t.test( x, alternative = "greater", conf.level = 0.9 );
greater.custom.alpha <- list(
x = x,
pValue = greater.custom.alpha$p.value,
statistic = greater.custom.alpha$statistic,
lower = greater.custom.alpha$conf.int[1],
upper = greater.custom.alpha$conf.int[2]
);
x <- rnorm( 100, 1.0, 2.0 );
y <- rnorm( 100, 1.0, 2.0 );
paired <- t.test( x, y, paired = TRUE );
paired <- list( x = x, y = y, pValue = paired$p.value, statistic = paired$statistic, lower = paired$conf.int[1], upper = paired$conf.int[2] );
x <- rnorm( 100, 1.0, 2.0 );
y <- rnorm( 100, 2.0, 2.0 );
paired.less <- t.test( x, y, paired = TRUE, alternative = "less" );
paired.less <- list( x = x, y = y, pValue = paired.less$p.value, statistic = paired.less$statistic, lower = paired.less$conf.int[1], upper = paired.less$conf.int[2] );
# Convert fixture data to JSON:
twosided <- to_json( twosided );
twosided.custom.alpha <- to_json( twosided.custom.alpha );
greater <- to_json( greater );
less <- to_json( less );
paired <- to_json( paired );
paired.less <- to_json( paired.less );
less.custom.alpha <- to_json( less.custom.alpha );
greater.custom.alpha <- to_json( greater.custom.alpha );
# Write the data to file...
filepath <- get_filepath( "twosided.json" );
write( twosided, filepath );
filepath <- get_filepath( "twosided_custom_alpha.json" );
write( twosided.custom.alpha, filepath );
filepath <- get_filepath( "greater.json" );
write( greater, filepath );
filepath <- get_filepath( "less.json" );
write( less, filepath );
filepath <- get_filepath( "paired.json" );
write( paired, filepath );
filepath <- get_filepath( "paired_less.json" );
write( paired.less, filepath );
filepath <- get_filepath( "less_custom_alpha.json" );
write( less.custom.alpha, filepath );
filepath <- get_filepath( "greater_custom_alpha.json" );
write( greater.custom.alpha, filepath );
}
main();
| /lib/node_modules/@stdlib/stats/ttest/test/fixtures/r/runner.R | permissive | doc22940/stdlib | R | false | false | 5,267 | r | #!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set the precision to 16 digits:
options( digits = 16 );
#' Generate test fixtures.
#'
#' @examples
#' main();
main <- function() {
#' Get the script filepath.
#'
#' @return The absolute path of this script
#'
#' @examples
#' filepath <- get_script_path();
get_script_path <- function() {
args <- commandArgs( trailingOnly = FALSE );
needle <- "--file=";
match <- grep( needle, args );
if ( length( match ) > 0 ) {
# Rscript:
filepath <- sub( needle, "", args[match] );
} else {
ls_vars <- ls( sys.frames()[[1]] )
if ( "fileName" %in% ls_vars ) {
# Source'd via RStudio:
filepath <- sys.frames()[[1]]$fileName; # nolint
} else {
# Source'd via R console:
filepath <- sys.frames()[[1]]$ofile;
}
}
return( normalizePath( filepath ) );
}
#' Convert a data structure to JSON.
#'
#' @param x A data structure to convert
#' @return JSON blob
#'
#' @examples
#' x <- seq( -6.5, 25, 0.5 );
#' json <- to_json( x );
to_json <- function( x ) {
return( jsonlite::toJSON( x, digits = 16, auto_unbox = TRUE ) );
}
#' Generate an output absolute filepath based on the script directory.
#'
#' @param name An output filename
#' @return An absolute filepath
#'
#' @examples
#' filepath <- get_filepath( "data.json" );
get_filepath <- function( name ) {
return( paste( source_dir, "/", name, sep = "" ) );
}
# Get the directory of this script:
source_dir <- dirname( get_script_path() );
# Generate test fixture data:
x <- rnorm( 100 );
twosided <- t.test( x );
twosided <- list( x = x, pValue = twosided$p.value, statistic = twosided$statistic, lower = twosided$conf.int[1], upper = twosided$conf.int[2] );
x <- rnorm( 100, 2.0, 2.5 );
twosided.custom.alpha <- t.test( x, conf.level = 0.9 );
twosided.custom.alpha <- list(
x = x, pValue = twosided.custom.alpha$p.value,
statistic = twosided.custom.alpha$statistic,
lower = twosided.custom.alpha$conf.int[1],
upper = twosided.custom.alpha$conf.int[2]
);
x <- rnorm( 100 );
less <- t.test( x, alternative = "less" );
less <- list( x = x, pValue = less$p.value, statistic = less$statistic, lower = less$conf.int[1], upper = less$conf.int[2] );
x <- rnorm( 100 );
greater <- t.test( x, alternative = "greater" );
greater <- list( x = x, pValue = greater$p.value, statistic = greater$statistic, lower = greater$conf.int[1], upper = greater$conf.int[2] );
x <- rnorm( 100 );
less.custom.alpha <- t.test( x, alternative = "less", conf.level = 0.99 );
less.custom.alpha <- list( x = x, pValue = less.custom.alpha$p.value, statistic = less.custom.alpha$statistic, lower = less.custom.alpha$conf.int[1], upper = less.custom.alpha$conf.int[2] );
x <- rnorm( 100 );
greater.custom.alpha <- t.test( x, alternative = "greater", conf.level = 0.9 );
greater.custom.alpha <- list(
x = x,
pValue = greater.custom.alpha$p.value,
statistic = greater.custom.alpha$statistic,
lower = greater.custom.alpha$conf.int[1],
upper = greater.custom.alpha$conf.int[2]
);
x <- rnorm( 100, 1.0, 2.0 );
y <- rnorm( 100, 1.0, 2.0 );
paired <- t.test( x, y, paired = TRUE );
paired <- list( x = x, y = y, pValue = paired$p.value, statistic = paired$statistic, lower = paired$conf.int[1], upper = paired$conf.int[2] );
x <- rnorm( 100, 1.0, 2.0 );
y <- rnorm( 100, 2.0, 2.0 );
paired.less <- t.test( x, y, paired = TRUE, alternative = "less" );
paired.less <- list( x = x, y = y, pValue = paired.less$p.value, statistic = paired.less$statistic, lower = paired.less$conf.int[1], upper = paired.less$conf.int[2] );
# Convert fixture data to JSON:
twosided <- to_json( twosided );
twosided.custom.alpha <- to_json( twosided.custom.alpha );
greater <- to_json( greater );
less <- to_json( less );
paired <- to_json( paired );
paired.less <- to_json( paired.less );
less.custom.alpha <- to_json( less.custom.alpha );
greater.custom.alpha <- to_json( greater.custom.alpha );
# Write the data to file...
filepath <- get_filepath( "twosided.json" );
write( twosided, filepath );
filepath <- get_filepath( "twosided_custom_alpha.json" );
write( twosided.custom.alpha, filepath );
filepath <- get_filepath( "greater.json" );
write( greater, filepath );
filepath <- get_filepath( "less.json" );
write( less, filepath );
filepath <- get_filepath( "paired.json" );
write( paired, filepath );
filepath <- get_filepath( "paired_less.json" );
write( paired.less, filepath );
filepath <- get_filepath( "less_custom_alpha.json" );
write( less.custom.alpha, filepath );
filepath <- get_filepath( "greater_custom_alpha.json" );
write( greater.custom.alpha, filepath );
}
main();
|
#' Cleans source data from NOAA
#'
#' \code{eq_clean_data} cleans date, latitude and longitude, and location name
#' from the source NOAA data
#'
#' @param data A data frame with raw data obtained from NOAA website
#'
#' @return A data frame with cleaned date, latitude, longitude and location
#' columns
#'
#' @details The function adds a column DATE with cleaned date, transforms LATITUDE and
#' LONGITUDE columns as numeric objects and transforms LOCATION_NAME by removing
#' the country and transforming to title case.
#'
#' @examples
#' \dontrun{
#' data <- readr::read_delim("earthquakes.tsv.gz", delim = "\t")
#' clean_data <- eq_clean_data(data)
#' }
#'
#' @importFrom dplyr %>% mutate select
#' @importFrom lubridate ymd
#' @importFrom stringr str_pad
#'
#' @export
eq_clean_data <- function(data) {
data <- data %>%
dplyr::mutate_(
year_fix = ~stringr::str_pad(as.character(abs(YEAR)), width = 4,
side = "left", pad = "0"),
date_paste = ~paste(year_fix, MONTH, DAY, sep = "-"),
DATE = ~lubridate::ymd(date_paste, truncated = 2)) %>%
dplyr::select_(quote(-year_fix), quote(-date_paste))
lubridate::year(data$DATE) <- data$YEAR
data <- data %>%
dplyr::mutate_(LATITUDE = ~as.numeric(LATITUDE),
LONGITUDE = ~as.numeric(LONGITUDE))
data <- eq_location_clean(data)
data
}
#' Cleans earthquake location data
#'
#' @param data A data frame with raw data obtained from NOAA website
#'
#' @return A data frame with cleaned LOCATION_NAME column
#'
#' @details This function transforms NOAA data frame LOCATION_NAME column by
#' trimming the country name (if applicable) and converting to title case
#'
#' @note The function is not exported
#'
#' @examples
#' \dontrun{
#' data <- readr::read_delim("earthquakes.tsv.gz", delim = "\t")
#' clean_data <- eq_location_clean(data)
#' }
#'
#' @importFrom dplyr %>% mutate
#' @importFrom stringr str_replace str_trim str_to_title
eq_location_clean <- function(data) {
data <- data %>%
dplyr::mutate_(LOCATION_NAME = ~LOCATION_NAME %>%
stringr::str_replace(paste0(COUNTRY, ":"), "") %>%
stringr::str_trim("both") %>%
stringr::str_to_title())
data
}
| /R/data_prep.R | permissive | JimMeister/capstoneJH | R | false | false | 2,376 | r |
#' Cleans source data from NOAA
#'
#' \code{eq_clean_data} cleans date, latitude and longitude, and location name
#' from the source NOAA data
#'
#' @param data A data frame with raw data obtained from NOAA website
#'
#' @return A data frame with cleaned date, latitude, longitude and location
#' columns
#'
#' @details The function adds a column DATE with cleaned date, transforms LATITUDE and
#' LONGITUDE columns as numeric objects and transforms LOCATION_NAME by removing
#' the country and transforming to title case.
#'
#' @examples
#' \dontrun{
#' data <- readr::read_delim("earthquakes.tsv.gz", delim = "\t")
#' clean_data <- eq_clean_data(data)
#' }
#'
#' @importFrom dplyr %>% mutate select
#' @importFrom lubridate ymd
#' @importFrom stringr str_pad
#'
#' @export
eq_clean_data <- function(data) {
data <- data %>%
dplyr::mutate_(
year_fix = ~stringr::str_pad(as.character(abs(YEAR)), width = 4,
side = "left", pad = "0"),
date_paste = ~paste(year_fix, MONTH, DAY, sep = "-"),
DATE = ~lubridate::ymd(date_paste, truncated = 2)) %>%
dplyr::select_(quote(-year_fix), quote(-date_paste))
lubridate::year(data$DATE) <- data$YEAR
data <- data %>%
dplyr::mutate_(LATITUDE = ~as.numeric(LATITUDE),
LONGITUDE = ~as.numeric(LONGITUDE))
data <- eq_location_clean(data)
data
}
#' Cleans earthquake location data
#'
#' @param data A data frame with raw data obtained from NOAA website
#'
#' @return A data frame with cleaned LOCATION_NAME column
#'
#' @details This function transforms NOAA data frame LOCATION_NAME column by
#' trimming the country name (if applicable) and converting to title case
#'
#' @note The function is not exported
#'
#' @examples
#' \dontrun{
#' data <- readr::read_delim("earthquakes.tsv.gz", delim = "\t")
#' clean_data <- eq_location_clean(data)
#' }
#'
#' @importFrom dplyr %>% mutate
#' @importFrom stringr str_replace str_trim str_to_title
eq_location_clean <- function(data) {
data <- data %>%
dplyr::mutate_(LOCATION_NAME = ~LOCATION_NAME %>%
stringr::str_replace(paste0(COUNTRY, ":"), "") %>%
stringr::str_trim("both") %>%
stringr::str_to_title())
data
}
|
library(httptest)
library(here)
# First evaluate the API KEY if available
api_key <- Sys.getenv("NASSQS_TOKEN")
api_file <- here::here("tests/testthat/api-key.txt")
if(nchar(Sys.getenv("NASSQS_TOKEN") ) != 36 & file.exists(api_file)) {
Sys.setenv(NASSQS_TOKEN = readLines(api_file))
}
# Parameters
params <- list(
agg_level_desc = "STATE",
commodity_desc = "CORN",
domaincat_desc = "NOT SPECIFIED",
state_alpha = "VA",
statisticcat_desc = "AREA HARVESTED",
year = "2012"
)
### Generate error response data objects if needed
#testparams <- params
## 400 error
# testparams$year <- 2102
# query <- list(key = api_key)
# query <- append(query, flatten(testparams))
# r <- httr::GET("https://quickstats.nass.usda.gov/api/api_GET", query = query)
#saveRDS(r, file = test_path("testdata", "qsresponse_400.rds"))
# r <- httr::GET("http://httpbin.org/status/400")
# saveRDS(r, file = test_path("testdata", "response_400.rds"))
# # 413 error
# query <- list(key = api_key)
# query <- append(query, flatten(list(year__GET = 2000)))
# r <- httr::GET("https://quickstats.nass.usda.gov/api/api_GET", query = query)
# saveRDS(r, file = test_path("testdata", "qsresponse_413.rds"))
#r <- httr::GET("http://httpbin.org/status/413")
#saveRDS(r, file = test_path("testdata", "response_413.rds"))
# 429 error
# r <- httr::GET("http://httpbin.org/status/429")
# saveRDS(r, file = test_path("testdata", "response_429.rds"))
with_mock_api <- function(expr) {
# Set a fake token just in this context
old_token <- Sys.getenv("NASSQS_TOKEN")
Sys.setenv(NASSQS_TOKEN = "API_KEY")
on.exit(Sys.setenv(NASSQS_TOKEN = old_token))
httptest::with_mock_api(expr)
}
with_authentication <- function(expr) {
if (nchar(Sys.getenv("NASSQS_TOKEN")) == 36) {
# Only evaluate if a token is set
expr
}
}
| /tests/testthat/setup.R | permissive | ropensci/rnassqs | R | false | false | 1,823 | r | library(httptest)
library(here)
# First evaluate the API KEY if available
api_key <- Sys.getenv("NASSQS_TOKEN")
api_file <- here::here("tests/testthat/api-key.txt")
if(nchar(Sys.getenv("NASSQS_TOKEN") ) != 36 & file.exists(api_file)) {
Sys.setenv(NASSQS_TOKEN = readLines(api_file))
}
# Parameters
params <- list(
agg_level_desc = "STATE",
commodity_desc = "CORN",
domaincat_desc = "NOT SPECIFIED",
state_alpha = "VA",
statisticcat_desc = "AREA HARVESTED",
year = "2012"
)
### Generate error response data objects if needed
#testparams <- params
## 400 error
# testparams$year <- 2102
# query <- list(key = api_key)
# query <- append(query, flatten(testparams))
# r <- httr::GET("https://quickstats.nass.usda.gov/api/api_GET", query = query)
#saveRDS(r, file = test_path("testdata", "qsresponse_400.rds"))
# r <- httr::GET("http://httpbin.org/status/400")
# saveRDS(r, file = test_path("testdata", "response_400.rds"))
# # 413 error
# query <- list(key = api_key)
# query <- append(query, flatten(list(year__GET = 2000)))
# r <- httr::GET("https://quickstats.nass.usda.gov/api/api_GET", query = query)
# saveRDS(r, file = test_path("testdata", "qsresponse_413.rds"))
#r <- httr::GET("http://httpbin.org/status/413")
#saveRDS(r, file = test_path("testdata", "response_413.rds"))
# 429 error
# r <- httr::GET("http://httpbin.org/status/429")
# saveRDS(r, file = test_path("testdata", "response_429.rds"))
with_mock_api <- function(expr) {
# Set a fake token just in this context
old_token <- Sys.getenv("NASSQS_TOKEN")
Sys.setenv(NASSQS_TOKEN = "API_KEY")
on.exit(Sys.setenv(NASSQS_TOKEN = old_token))
httptest::with_mock_api(expr)
}
with_authentication <- function(expr) {
if (nchar(Sys.getenv("NASSQS_TOKEN")) == 36) {
# Only evaluate if a token is set
expr
}
}
|
train <- function(dat_train, label_train, par=NULL){
#dat_train is a dataframe which the first 512 columns are the rgb features extracted by ourselves, the rest 5000 columns are the SIFT features
#label_train must be 0 and 1 in numeric type (not character or factor!!!)
#dat_train1= dat_train[,1:512]
#dat_train2= dat_train[,513:5512]
### Train a Gradient Boosting Model (GBM) using processed features from training images
### Input:
### - processed features from images
### - class labels for training images
### Output: training model specification
### load libraries
#library(ada)
library(gbm)
library(randomForest)
library(class)
library(xgboost)
library(caret)
########################### Ada boost model
#ada.fit= ada(label_train~.,data=dat_train1,type="discrete")
#ada.fit = ada(as.factor(label_train)~., data = dat_train1, type = 'discrete')
#print('adaboost done')
########################## Tune random forest model
# Tune parameter 'mtry'
set.seed(1234)
bestmtry <- tuneRF(y=as.factor(label_train), x=dat_train1, stepFactor=1.5, improve=1e-5, ntree=600)
best.mtry <- bestmtry[,1][which.min(bestmtry[,2])]
########################### Get random forest model
rf.fit=randomForest(as.factor(label_train)~., dat_train1, mtry=best.mtry, ntree=600, importance=T)
print('randomforest done')
########################### Tune Knn model
knn.Tuning<-data.frame(k=1:10,cvError=rep(NA,10))
for(i in 1:nrow(knn.Tuning)){
index= sample(rep(1:5,nrow(dat_train1)/5))
cvError.temp=0
for(j in 1:5){
data.train= dat_train1[index != j,]
data.test= dat_train1[index==j,]
knn.temp= knn(data.train, data.test, cl=as.factor(label_train[index != j]) , k = knn.Tuning$k[i])
cvError.temp=cvError.temp+(1- mean(label_train[index == j]==knn.temp))/5
}
knn.Tuning$cvError[i]= cvError.temp
print(paste(i, 'done'))
}
########################### Get k for Knn model
knn.Tuning<-knn.Tuning[order(knn.Tuning$cvError),]
print('knn done')
########################### Tune XG boost
dtrain <- xgb.DMatrix(as.matrix(dat_train1),label = label_train)
best_param = list()
best_seednumber = 1234
best_logloss = Inf
best_logloss_index = 0
for (iter in 1:30) {
param <- list(objective = "binary:logistic",
max_depth = sample(6:10, 1),
eta = runif(1, .01, .3),
gamma = runif(1, 0.0, 0.2)
)
cv.nround = 50
cv.nfold = 5
seed.number = sample.int(10000, 1)[[1]]
set.seed(seed.number)
mdcv <- xgb.cv(data=dtrain, params = param, nthread=6,
nfold=cv.nfold, nrounds=cv.nround,
verbose = T, early.stop.round=8, maximize=FALSE)
min_logloss = min(mdcv[, test.error.mean])
min_logloss_index = which.min(mdcv[, test.error.mean])
if (min_logloss < best_logloss) {
best_logloss = min_logloss
best_logloss_index = min_logloss_index
best_seednumber = seed.number
best_param = param
}
}
########################### get XG boost model
nround = best_logloss_index
set.seed(best_seednumber)
xg.fit <- xgboost(data=dtrain, params=best_param, nrounds=nround, nthread=6)
print('xgboost done')
#######################
#######gbm
gbmGrid <- expand.grid(interaction.depth = (3:5) * 2,n.trees = (8:10)*25,shrinkage = .1,
n.minobsinnode = 10)
gbmcontrol <- trainControl(method = 'cv', number = 5)
gbmfit <- caret::train(dat_train1, label_train,
method = "gbm", trControl = gbmcontrol, verbose = FALSE,
bag.fraction = 0.5, tuneGrid = gbmGrid)
gbm_fit <- gbm.fit(x = dat_train1, y = label_train, n.trees = gbmfit$bestTune$n.trees, interaction.depth = gbmfit$bestTune$interaction.depth,
shrinkage = gbmfit$bestTune$shrinkage, n.minobsinnode = gbmfit$bestTune$n.minobsinnode, distribution = 'bernoulli')
###########This is the baseline GBM, we use the default parameter values
#gbmGrid2 <- expand.grid(interaction.depth = (3:5)*2 ,n.trees = (8:10)*25,shrinkage = .1,
# n.minobsinnode = 10)
#gbmcontrol2 <- trainControl(method = 'cv', number = 5)
#gbmfit2 <- caret::train(dat_train2, label_train,
# method = "gbm", trControl = gbmcontrol2, verbose = FALSE,
# bag.fraction = 0.5, tuneGrid = gbmGrid2)
gbm_fit2 <- gbm.fit(x = dat_train2, y = label_train, distribution = 'bernoulli')
#n.trees = gbmfit2$bestTune$n.trees,
#interaction.depth = gbmfit2$bestTune$interaction.depth,
#shrinkage = gbmfit2$bestTune$shrinkage,
#n.minobsinnode = gbmfit2$bestTune$n.minobsinnode,
return(list(#fit_ada=ada.fit,fit_rf=rf.fit, #fit_svm= svm.fit, kernel= kernel,
dat_train= dat_train1, label_train= label_train, k=knn.Tuning$k[1], fit_xgboost=xg.fit,
fit_gbm = gbm_fit, fit_baseline= gbm_fit2))
}
| /train 2.R | no_license | panyijia1269/spr2017-proj3-group-14-master | R | false | false | 5,080 | r | train <- function(dat_train, label_train, par=NULL){
#dat_train is a dataframe which the first 512 columns are the rgb features extracted by ourselves, the rest 5000 columns are the SIFT features
#label_train must be 0 and 1 in numeric type (not character or factor!!!)
#dat_train1= dat_train[,1:512]
#dat_train2= dat_train[,513:5512]
### Train a Gradient Boosting Model (GBM) using processed features from training images
### Input:
### - processed features from images
### - class labels for training images
### Output: training model specification
### load libraries
#library(ada)
library(gbm)
library(randomForest)
library(class)
library(xgboost)
library(caret)
########################### Ada boost model
#ada.fit= ada(label_train~.,data=dat_train1,type="discrete")
#ada.fit = ada(as.factor(label_train)~., data = dat_train1, type = 'discrete')
#print('adaboost done')
########################## Tune random forest model
# Tune parameter 'mtry'
set.seed(1234)
bestmtry <- tuneRF(y=as.factor(label_train), x=dat_train1, stepFactor=1.5, improve=1e-5, ntree=600)
best.mtry <- bestmtry[,1][which.min(bestmtry[,2])]
########################### Get random forest model
rf.fit=randomForest(as.factor(label_train)~., dat_train1, mtry=best.mtry, ntree=600, importance=T)
print('randomforest done')
########################### Tune Knn model
knn.Tuning<-data.frame(k=1:10,cvError=rep(NA,10))
for(i in 1:nrow(knn.Tuning)){
index= sample(rep(1:5,nrow(dat_train1)/5))
cvError.temp=0
for(j in 1:5){
data.train= dat_train1[index != j,]
data.test= dat_train1[index==j,]
knn.temp= knn(data.train, data.test, cl=as.factor(label_train[index != j]) , k = knn.Tuning$k[i])
cvError.temp=cvError.temp+(1- mean(label_train[index == j]==knn.temp))/5
}
knn.Tuning$cvError[i]= cvError.temp
print(paste(i, 'done'))
}
########################### Get k for Knn model
knn.Tuning<-knn.Tuning[order(knn.Tuning$cvError),]
print('knn done')
########################### Tune XG boost
dtrain <- xgb.DMatrix(as.matrix(dat_train1),label = label_train)
best_param = list()
best_seednumber = 1234
best_logloss = Inf
best_logloss_index = 0
for (iter in 1:30) {
param <- list(objective = "binary:logistic",
max_depth = sample(6:10, 1),
eta = runif(1, .01, .3),
gamma = runif(1, 0.0, 0.2)
)
cv.nround = 50
cv.nfold = 5
seed.number = sample.int(10000, 1)[[1]]
set.seed(seed.number)
mdcv <- xgb.cv(data=dtrain, params = param, nthread=6,
nfold=cv.nfold, nrounds=cv.nround,
verbose = T, early.stop.round=8, maximize=FALSE)
min_logloss = min(mdcv[, test.error.mean])
min_logloss_index = which.min(mdcv[, test.error.mean])
if (min_logloss < best_logloss) {
best_logloss = min_logloss
best_logloss_index = min_logloss_index
best_seednumber = seed.number
best_param = param
}
}
########################### get XG boost model
nround = best_logloss_index
set.seed(best_seednumber)
xg.fit <- xgboost(data=dtrain, params=best_param, nrounds=nround, nthread=6)
print('xgboost done')
#######################
#######gbm
gbmGrid <- expand.grid(interaction.depth = (3:5) * 2,n.trees = (8:10)*25,shrinkage = .1,
n.minobsinnode = 10)
gbmcontrol <- trainControl(method = 'cv', number = 5)
gbmfit <- caret::train(dat_train1, label_train,
method = "gbm", trControl = gbmcontrol, verbose = FALSE,
bag.fraction = 0.5, tuneGrid = gbmGrid)
gbm_fit <- gbm.fit(x = dat_train1, y = label_train, n.trees = gbmfit$bestTune$n.trees, interaction.depth = gbmfit$bestTune$interaction.depth,
shrinkage = gbmfit$bestTune$shrinkage, n.minobsinnode = gbmfit$bestTune$n.minobsinnode, distribution = 'bernoulli')
###########This is the baseline GBM, we use the default parameter values
#gbmGrid2 <- expand.grid(interaction.depth = (3:5)*2 ,n.trees = (8:10)*25,shrinkage = .1,
# n.minobsinnode = 10)
#gbmcontrol2 <- trainControl(method = 'cv', number = 5)
#gbmfit2 <- caret::train(dat_train2, label_train,
# method = "gbm", trControl = gbmcontrol2, verbose = FALSE,
# bag.fraction = 0.5, tuneGrid = gbmGrid2)
gbm_fit2 <- gbm.fit(x = dat_train2, y = label_train, distribution = 'bernoulli')
#n.trees = gbmfit2$bestTune$n.trees,
#interaction.depth = gbmfit2$bestTune$interaction.depth,
#shrinkage = gbmfit2$bestTune$shrinkage,
#n.minobsinnode = gbmfit2$bestTune$n.minobsinnode,
return(list(#fit_ada=ada.fit,fit_rf=rf.fit, #fit_svm= svm.fit, kernel= kernel,
dat_train= dat_train1, label_train= label_train, k=knn.Tuning$k[1], fit_xgboost=xg.fit,
fit_gbm = gbm_fit, fit_baseline= gbm_fit2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_CHO.R
\name{create_capture_CHO}
\alias{create_capture_CHO}
\title{Create capture data table for Choupal, Portugal.}
\usage{
create_capture_CHO(data)
}
\arguments{
\item{data}{Data frame. Primary data from Choupal.}
}
\value{
A data frame.
}
\description{
Create capture data table in standard format for data from Choupal,
Portugal.
}
| /man/create_capture_CHO.Rd | no_license | SPI-Birds/pipelines | R | false | true | 420 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_CHO.R
\name{create_capture_CHO}
\alias{create_capture_CHO}
\title{Create capture data table for Choupal, Portugal.}
\usage{
create_capture_CHO(data)
}
\arguments{
\item{data}{Data frame. Primary data from Choupal.}
}
\value{
A data frame.
}
\description{
Create capture data table in standard format for data from Choupal,
Portugal.
}
|
source("../Calculations/TwolociA4/R/hap.R")
source("../Calculations/TwolociX4/R/female_hap.R")
source("../Calculations/TwolociX4/R/male_hap.R")
r <- seq(0, 0.5, len=251)
#k <- c(0:19, 500)
k <- c(0:6, 500)
hapA <- array(dim=c(length(r), 3, length(k)))
hapXm <- hapXf <- array(dim=c(length(r), 4, length(k)))
dimnames(hapA) <- list(r, c("AA","AB", "AC"), k)
dimnames(hapXf) <- dimnames(hapXm) <- list(r, c("AA","AB", "AC", "CC"), k)
for(i in seq(along=r)) {
for(j in seq(along=k)) {
hapA[i,,j] <- hap(r[i], k[j])
hapXf[i,,j] <- femalehap(r[i], k[j])
hapXm[i,,j] <- malehap(r[i], k[j])
}
}
hapA[hapA < 1e-12] <- 0
hapXf[hapXf < 1e-12] <- 0
hapXm[hapXm < 1e-12] <- 0
postscript("../Figs/happrob_fig.eps", height=7.4, width=6.5, pointsize=10, onefile=TRUE, horizontal=FALSE)
par(oma=c(0, 0, 1, 0), mar=c(4.1, 5.1, 3.1, 1.1))
layout(cbind(c(1:3,12), 4:7, 8:11))
par(las=1)
ymx <- c(0.5, 0.25, 1/8, 1)
thecol <- c("red", "orange", "yellow3", "green", "cyan", "blue", "purple", "black")
for(i in 1:3) {
plot(r, hapA[,i,1], type="l", xlab="recombination fraction", ylim=c(0,ymx[i]),
ylab="Haplotype probability",
xaxs="i", col=thecol[1])
title(main=colnames(hapA)[i], line=0.5, xpd=TRUE)
if(i==1) title(main="Autosome", line=2.2, xpd=TRUE)
for(j in 2:7)
lines(r, hapA[,i,j], col=thecol[j], lty=ifelse(j==2, 2, 1))
lines(r, hapA[,i,length(k)], col=thecol[length(thecol)])
}
for(i in 1:4) {
plot(r, hapXf[,i,1], type="l", xlab="recombination fraction", ylim=c(0,ymx[i]),
ylab="Haplotype probability",
xaxs="i", col=thecol[1])
title(main=colnames(hapXf)[i], line=0.5, xpd=TRUE)
if(i==1) title(main="Female X chromosome", line=2.2, xpd=TRUE)
for(j in 2:7)
lines(r, hapXf[,i,j], col=thecol[j], lty=ifelse(j==2, 2, 1))
lines(r, hapXf[,i,length(k)], col=thecol[length(thecol)])
}
for(i in 1:4) {
plot(r, hapXm[,i,1], type="l", xlab="recombination fraction", ylim=c(0,ymx[i]),
ylab="Haplotype probability",
xaxs="i", col=thecol[1])
title(main=colnames(hapXm)[i], line=0.5, xpd=TRUE)
if(i==1) title(main="Male X chromosome", line=2.2, xpd=TRUE)
for(j in 2:7)
lines(r, hapXm[,i,j], col=thecol[j], lty=ifelse(j==2, 2, 1))
lines(r, hapXm[,i,length(k)], col=thecol[length(thecol)])
}
par(mar=rep(0.1, 4))
plot(0,0,bty="n", xlab="", ylab="", xaxt="n", yaxt="n", type="n")
legend("center", lwd=2, col=thecol, lty=c(1,2,1,1,1,1,1,1), seg.len=5,
c(expression(F[0]),
expression(F[1]),
expression(F[2]),
expression(F[3]),
expression(F[4]),
expression(F[5]),
expression(F[6]),
expression(F[infinity])))
dev.off()
| /R/happrob_fig.R | permissive | kbroman/preCCProbPaper | R | false | false | 2,681 | r | source("../Calculations/TwolociA4/R/hap.R")
source("../Calculations/TwolociX4/R/female_hap.R")
source("../Calculations/TwolociX4/R/male_hap.R")
r <- seq(0, 0.5, len=251)
#k <- c(0:19, 500)
k <- c(0:6, 500)
hapA <- array(dim=c(length(r), 3, length(k)))
hapXm <- hapXf <- array(dim=c(length(r), 4, length(k)))
dimnames(hapA) <- list(r, c("AA","AB", "AC"), k)
dimnames(hapXf) <- dimnames(hapXm) <- list(r, c("AA","AB", "AC", "CC"), k)
for(i in seq(along=r)) {
for(j in seq(along=k)) {
hapA[i,,j] <- hap(r[i], k[j])
hapXf[i,,j] <- femalehap(r[i], k[j])
hapXm[i,,j] <- malehap(r[i], k[j])
}
}
hapA[hapA < 1e-12] <- 0
hapXf[hapXf < 1e-12] <- 0
hapXm[hapXm < 1e-12] <- 0
postscript("../Figs/happrob_fig.eps", height=7.4, width=6.5, pointsize=10, onefile=TRUE, horizontal=FALSE)
par(oma=c(0, 0, 1, 0), mar=c(4.1, 5.1, 3.1, 1.1))
layout(cbind(c(1:3,12), 4:7, 8:11))
par(las=1)
ymx <- c(0.5, 0.25, 1/8, 1)
thecol <- c("red", "orange", "yellow3", "green", "cyan", "blue", "purple", "black")
for(i in 1:3) {
plot(r, hapA[,i,1], type="l", xlab="recombination fraction", ylim=c(0,ymx[i]),
ylab="Haplotype probability",
xaxs="i", col=thecol[1])
title(main=colnames(hapA)[i], line=0.5, xpd=TRUE)
if(i==1) title(main="Autosome", line=2.2, xpd=TRUE)
for(j in 2:7)
lines(r, hapA[,i,j], col=thecol[j], lty=ifelse(j==2, 2, 1))
lines(r, hapA[,i,length(k)], col=thecol[length(thecol)])
}
for(i in 1:4) {
plot(r, hapXf[,i,1], type="l", xlab="recombination fraction", ylim=c(0,ymx[i]),
ylab="Haplotype probability",
xaxs="i", col=thecol[1])
title(main=colnames(hapXf)[i], line=0.5, xpd=TRUE)
if(i==1) title(main="Female X chromosome", line=2.2, xpd=TRUE)
for(j in 2:7)
lines(r, hapXf[,i,j], col=thecol[j], lty=ifelse(j==2, 2, 1))
lines(r, hapXf[,i,length(k)], col=thecol[length(thecol)])
}
for(i in 1:4) {
plot(r, hapXm[,i,1], type="l", xlab="recombination fraction", ylim=c(0,ymx[i]),
ylab="Haplotype probability",
xaxs="i", col=thecol[1])
title(main=colnames(hapXm)[i], line=0.5, xpd=TRUE)
if(i==1) title(main="Male X chromosome", line=2.2, xpd=TRUE)
for(j in 2:7)
lines(r, hapXm[,i,j], col=thecol[j], lty=ifelse(j==2, 2, 1))
lines(r, hapXm[,i,length(k)], col=thecol[length(thecol)])
}
par(mar=rep(0.1, 4))
plot(0,0,bty="n", xlab="", ylab="", xaxt="n", yaxt="n", type="n")
legend("center", lwd=2, col=thecol, lty=c(1,2,1,1,1,1,1,1), seg.len=5,
c(expression(F[0]),
expression(F[1]),
expression(F[2]),
expression(F[3]),
expression(F[4]),
expression(F[5]),
expression(F[6]),
expression(F[infinity])))
dev.off()
|
# Process nofib data
#library(dplyr)
#library(rlang)
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
resultPath <- "ben_1/"
compiler <- "allCalls"
#get list of files
files <- list.files(resultPath, pattern="*.csv", all.files=FALSE,
full.names=FALSE, ignore.case = TRUE)
files
splitFun <- function(x) strsplit(x, "\\.")
splitNames <- lapply(X = files, FUN = splitFun)
splitNames
#get list of benchmarks by looking at third component of csv file name (1.2.benchmark.csv)
benchmarks = c()
for(sn in splitNames) {
benchmarks <- unique(rbind(benchmarks, sn[[1]][3]))
}
#benchmarks = c("set-operations-set")
benchmarks
variants <- c("all", "vanilla", "some", "none", "head")
vanillaVariant <- match(x = c("vanilla"), table = variants)
csv <- read.csv(paste(resultPath, compiler, ".", variants[[1]], ".", benchmarks[[1]], ".csv", sep=""), header = TRUE)
benchNames <- csv[,1]
resultNames <- list(variants = variants, executables = benchmarks, benchmarks = benchNames)
csvresults <- array(dim = c(length(variants), length(benchmarks), length(benchNames)), dimnames = resultNames)
for(vi in 1:length(variants)) {
for (bi in 1:length(benchmarks)) {
variant <- variants[vi]
benchmark <- benchmarks[bi]
csv <- read.csv(paste(resultPath, compiler, ".", variant, ".", benchmark, ".csv", sep=""), header = TRUE)
csvresults[vi,bi,] <- csv[,2]
}
}
csvresults[,1,]
speedupDim <- dim(csvresults)
speedups <- array(dim = dim(csvresults))
vanillaVariant[[1]]
for(vi in 1:length(variants)) {
for (bi in 1:length(benchmarks)) {
variant <- variants[vi]
benchmark <- benchmarks[bi]
speedup <- csvresults[vanillaVariant[[1]],bi,]/csvresults[vi,bi,]
print(speedup)
speedups[vi,bi,] <- speedup
}
}
speedups[1,1,]
meanSpeedups <- matrix(nrow = length(benchmarks), ncol = length(variants), dimnames = list(bench = benchmarks, algo=variants))
for(vi in 1:length(variants)) {
variant <- variants[vi]
for(bi in 1:length(benchmarks)) {
benchmark <- benchmarks[bi]
speedup <- csvresults[vanillaVariant,bi,]/csvresults[vi,bi,]
x <- gm_mean(speedup)
meanSpeedups[bi, vi] <- x
}
}
geoMean_overall <- apply(FUN = gm_mean, X = meanSpeedups, MARGIN = c(2))
meanSpeedups <- rbind(meanSpeedups, geoMean_overall)
meanSpeedups
heatmap(meanSpeedups)
(sort(apply(FUN = gm_mean, X = meanSpeedups, MARGIN = c(2))) * 100) - 100
| /libs/xeno/xeno_eval.R | permissive | AndreasPK/bench_hc_libs | R | false | false | 2,428 | r | # Process nofib data
#library(dplyr)
#library(rlang)
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
resultPath <- "ben_1/"
compiler <- "allCalls"
#get list of files
files <- list.files(resultPath, pattern="*.csv", all.files=FALSE,
full.names=FALSE, ignore.case = TRUE)
files
splitFun <- function(x) strsplit(x, "\\.")
splitNames <- lapply(X = files, FUN = splitFun)
splitNames
#get list of benchmarks by looking at third component of csv file name (1.2.benchmark.csv)
benchmarks = c()
for(sn in splitNames) {
benchmarks <- unique(rbind(benchmarks, sn[[1]][3]))
}
#benchmarks = c("set-operations-set")
benchmarks
variants <- c("all", "vanilla", "some", "none", "head")
vanillaVariant <- match(x = c("vanilla"), table = variants)
csv <- read.csv(paste(resultPath, compiler, ".", variants[[1]], ".", benchmarks[[1]], ".csv", sep=""), header = TRUE)
benchNames <- csv[,1]
resultNames <- list(variants = variants, executables = benchmarks, benchmarks = benchNames)
csvresults <- array(dim = c(length(variants), length(benchmarks), length(benchNames)), dimnames = resultNames)
for(vi in 1:length(variants)) {
for (bi in 1:length(benchmarks)) {
variant <- variants[vi]
benchmark <- benchmarks[bi]
csv <- read.csv(paste(resultPath, compiler, ".", variant, ".", benchmark, ".csv", sep=""), header = TRUE)
csvresults[vi,bi,] <- csv[,2]
}
}
csvresults[,1,]
speedupDim <- dim(csvresults)
speedups <- array(dim = dim(csvresults))
vanillaVariant[[1]]
for(vi in 1:length(variants)) {
for (bi in 1:length(benchmarks)) {
variant <- variants[vi]
benchmark <- benchmarks[bi]
speedup <- csvresults[vanillaVariant[[1]],bi,]/csvresults[vi,bi,]
print(speedup)
speedups[vi,bi,] <- speedup
}
}
speedups[1,1,]
meanSpeedups <- matrix(nrow = length(benchmarks), ncol = length(variants), dimnames = list(bench = benchmarks, algo=variants))
for(vi in 1:length(variants)) {
variant <- variants[vi]
for(bi in 1:length(benchmarks)) {
benchmark <- benchmarks[bi]
speedup <- csvresults[vanillaVariant,bi,]/csvresults[vi,bi,]
x <- gm_mean(speedup)
meanSpeedups[bi, vi] <- x
}
}
geoMean_overall <- apply(FUN = gm_mean, X = meanSpeedups, MARGIN = c(2))
meanSpeedups <- rbind(meanSpeedups, geoMean_overall)
meanSpeedups
heatmap(meanSpeedups)
(sort(apply(FUN = gm_mean, X = meanSpeedups, MARGIN = c(2))) * 100) - 100
|
load("Y:/Offshore/Assessment/Data/Survey_data/2019/Survey_summary_output/Survey_all_results.RData")
Ban <- all.surv.dat[all.surv.dat$bank %in% c("Ban", "BanIce"),]
Ban <- arrange(Ban, year, cruise, date, tow)
write.csv(Ban, "Y:/Offshore/Assessment/2022/Supporting_tasks/Banquereau_survey_data.csv")
| /2022/banquereau_data_extraction.R | no_license | Mar-scal/Supporting_task_code | R | false | false | 302 | r | load("Y:/Offshore/Assessment/Data/Survey_data/2019/Survey_summary_output/Survey_all_results.RData")
Ban <- all.surv.dat[all.surv.dat$bank %in% c("Ban", "BanIce"),]
Ban <- arrange(Ban, year, cruise, date, tow)
write.csv(Ban, "Y:/Offshore/Assessment/2022/Supporting_tasks/Banquereau_survey_data.csv")
|
library(dplyr)
library(ggplot2)
batting <- read.csv('Batting.csv')
print(head(batting))
batting$BA <- batting$H/batting$AB
batting$OBP <- (batting$H + batting$BB + batting$HBP)/(batting$AB + batting$BB + batting$HBP + batting$SF)
batting$X1B <- batting$H - batting$X2B - batting$X3B - batting$HR
batting$SLG <- ((1 * batting$X1B) + (2 * batting$X2B) + (3 * batting$X3B) + (4 * batting$HR) ) / batting$AB
str(batting)
sal <- read.csv('Salaries.csv')
batting <- subset(batting,yearID >= 1985)
print(summary(batting))
sal_batting <- merge(batting,sal,by=c('playerID','yearID'))
print(summary(sal_batting))
lost_players <- subset(sal_batting,playerID %in% c('giambja01','damonjo01','saenzol01') )
lost_players <- subset(lost_players,yearID == 2001)
lost_players <- lost_players[,c('playerID','H','X2B','X3B','HR','OBP','SLG','BA','AB')]
print(head(lost_players))
avail.players <- filter(sal_batting,yearID==2001)
plt <- ggplot(avail.players,aes(x=OBP,y=salary)) + geom_point()
print(plt)
avail.players <- filter(avail.players,salary<8000000,OBP>0)
avail.players <- filter(avail.players,AB >= 500)
possible <- head(arrange(avail.players,desc(OBP)),10)
possible <- possible[,c('playerID','OBP','AB','salary')]
print(possible[2:4,])
| /moneyBall.R | no_license | G-M-C/MoneyBall | R | false | false | 1,240 | r | library(dplyr)
library(ggplot2)
batting <- read.csv('Batting.csv')
print(head(batting))
batting$BA <- batting$H/batting$AB
batting$OBP <- (batting$H + batting$BB + batting$HBP)/(batting$AB + batting$BB + batting$HBP + batting$SF)
batting$X1B <- batting$H - batting$X2B - batting$X3B - batting$HR
batting$SLG <- ((1 * batting$X1B) + (2 * batting$X2B) + (3 * batting$X3B) + (4 * batting$HR) ) / batting$AB
str(batting)
sal <- read.csv('Salaries.csv')
batting <- subset(batting,yearID >= 1985)
print(summary(batting))
sal_batting <- merge(batting,sal,by=c('playerID','yearID'))
print(summary(sal_batting))
lost_players <- subset(sal_batting,playerID %in% c('giambja01','damonjo01','saenzol01') )
lost_players <- subset(lost_players,yearID == 2001)
lost_players <- lost_players[,c('playerID','H','X2B','X3B','HR','OBP','SLG','BA','AB')]
print(head(lost_players))
avail.players <- filter(sal_batting,yearID==2001)
plt <- ggplot(avail.players,aes(x=OBP,y=salary)) + geom_point()
print(plt)
avail.players <- filter(avail.players,salary<8000000,OBP>0)
avail.players <- filter(avail.players,AB >= 500)
possible <- head(arrange(avail.players,desc(OBP)),10)
possible <- possible[,c('playerID','OBP','AB','salary')]
print(possible[2:4,])
|
setwd("/Users/jiemingchen/Documents/varimed/pcawg/dz_risk_var_varimed_staging_LR_final_ext_sex_eth_spop_zm")
## my own library
source("/Users/jiemingchen/R_codes/jmRlib.R")
library(ggplot2)
library(RColorBrewer)
library(plyr)
library(dplyr)
library(reshape2)
library(magrittr)
## input combined risks for 1KGp3
## note that LR = exp(sum(log(LR))) of all SNPs with given dz from same sample; LR_max = SNP with max abs logLR
LR.1kg = read.delim("combined_dz_risk_1000GP_spop_zm.txt", header = T, sep = "\t", stringsAsFactors = FALSE, na.strings = "") ## curr one
LR.1kg.final = LR.1kg %>% mutate(LLR = log10(LR), LLR_max = log10(LR_max))
LR.1kg.final$dataset = "1KGP3"
## input combined risks for ICGC_TCGA
LR.cancer = read.delim("combined_dz_risk_ICGC_TCGA_spop_zm_histology.txt", header = T, sep = "\t", stringsAsFactors = FALSE, na.strings = "")
LR.cancer.final = LR.cancer %>% mutate(LLR = log10(LR), LLR_max = log10(LR_max))
LR.cancer.final$dataset = "ICGC_TCGA"
### some stats
## number of cancer types in ICGC_TCGA patients
cancertypes = sort(unique(LR.cancer.final$histology_abbreviation))
## number of broad_phenotypes
dz.1kg = sort(unique(LR.1kg.final$broad_phenotype))
dz.tcga = sort(unique(LR.cancer.final$broad_phenotype))
# Preprocessing datasets and calculate Mann Whitney test p values (unadj) and BH (adj)
## Preprocessing datasets and calculate Mann Whitney test p values (unadj) and BH (adj) ####
## this function preprocesses the datasets into 3 matrices FOR ONE POPULATION
## Loop Mann-Whitney test for TCGA vs 1KGp3 (will take a while... about 6 min)
## 1: num(dz)-by-num(cancertypes) matrix of MW p values (unadj)
## 2: num(dz)-by-num(cancertypes) matrix of size of subsets (note subsets<10 of size are NA)
## 3: melted down version of 1 with 2 columns of dz and cancertypes as primary keys for p values (unadj)
## 4: melted down version subsetted by cancertypes to match dz_cancers
## input requires columns LLR and histology_abbreviation and broad_phenotype
## tflags 1: MW unadjusted p values
## 2: size of datasets
## 3: numSNPs (mean num over all individuals in each dataset, risk + protective)
preprocess_mat_by_pop <- function(pop, dzes, cancertypes, cancerdata, ref1kgdata, LLRcol, tflag=0)
{
## nested function to make an array of dz by cancer for apply
mwp <- function(cancerdata, ref1kgdata, pop, LLRcol, cancertype, dz, tflag=0)
{
tcga = subset(cancerdata, eval(parse(text=pop)) & broad_phenotype == dz & histology_abbreviation == cancertype)
onekg = subset(ref1kgdata, eval(parse(text=pop)) & broad_phenotype == dz)
# if tflag == 0, unadjusted MW pvalues
# arbitrary min datapoint of 10 in either dataset to compute MW test
# 2.sided unadjusted
# tflag = troubleshooting flag
if (tflag == 0)
{
return(ifelse(nrow(tcga) > 10 & nrow(onekg) > 10, wilcox.test(tcga[,LLRcol], onekg[,LLRcol])$p.value, NA))
}
else if (tflag == 1)
{
## tflag == 1, number of individuals in each dataset
return(paste("1KGP3:", nrow(onekg),"|TCGA:", nrow(tcga)))
}
else
{
## tflag == 2, mean over individuals of number of SNPs = numRiskAlleles + numProtectiveAlleles
numSNPs.tcga = mean(tcga[,"SNP_risk"] + tcga[,"SNP_protective"])
numSNPs.1kgp3 = mean(onekg[,"SNP_risk"] + onekg[,"SNP_protective"])
return(paste("1KGP3:", numSNPs.1kgp3,"|TCGA:", numSNPs.tcga))
}
}
# produce 2 cancertypes-by-dz matrices:
# if tflag == 0, unadjusted MW pvalues
# if tflag == 1, size of datasets (for troubleshooting)
if(tflag == 0)
{
mat.pval = as.data.frame(sapply(cancertypes, function(i) sapply(dzes, function(j) mwp(cancerdata, ref1kgdata, pop, "LLR", i, j, tflag))))
## (3) melt pval matrix for heatmap plotting, by histology,
## + unadj pval + BH-adj p val
mat.pval2 = cbind(rownames(mat.pval), mat.pval)
colnames(mat.pval2)[1] = "broad_phenotype"
mat.pval.m = melt(mat.pval2, variable.name = "histology_abbreviation", value.name = "LLR.p", id.vars = "broad_phenotype")
## BH-adj p values
mat.pval.m$LLR.p.adj = p.adjust(mat.pval.m$LLR.p, method = "BH") ## n = 4014 (excluding NAs)
mat.pval.m$rank = rank(mat.pval.m$LLR.p)
## subset1: cancer match subset
mat.ss1.cancer.match = mat.pval.m %>% subset(broad_phenotype == "Breast_cancer" |
broad_phenotype == "Colorectal_cancer" |
broad_phenotype == "Esophageal_cancer" |
broad_phenotype == "Renal_cell_cancer" |
broad_phenotype == "Renal_cell_carcinoma" |
broad_phenotype == "HCV-induced_hepatocellular_carcinoma" |
broad_phenotype == "HBV-induced_hepatocellular_carcinoma" |
broad_phenotype == "Hepatocellular_carcinoma" |
broad_phenotype == "Lung_adenocarcinoma" |
broad_phenotype == "Lung_cancer" |
broad_phenotype == "Non-Small_cell_lung_cancer" |
broad_phenotype == "Lung_cancer" |
broad_phenotype == "Squamous_cell_carcinoma_of_lungs" |
broad_phenotype == "Follicular_lymphoma" |
broad_phenotype == "Chronic_lymphocytic_leukemia" |
broad_phenotype == "Myeloproliferative_disorders" |
broad_phenotype == "Ovarian_cancer" |
broad_phenotype == "Pancreatic_cancer" |
broad_phenotype == "Prostate_cancer" |
broad_phenotype == "Melanoma" |
broad_phenotype == "Gastric_cancer" |
broad_phenotype == "Papillary_thyroid_cancer" |
broad_phenotype == "Thyroid_cancer")
## return
return(list(mat.pval, mat.pval.m, mat.ss1.cancer.match))
}
else if(tflag == 1)
{
## tflag == 1, number of individuals in each dataset
mat.nums = as.data.frame(sapply(cancertypes, function(i) sapply(dzes, function(j) mwp(cancerdata, ref1kgdata, pop, "LLR", i, j, tflag)))) ## debug
return(mat.nums)
}
else
{
## tflag == 2, mean over individuals of number of SNPs = numRiskAlleles + numProtectiveAlleles
mat.numSNPs = as.data.frame(sapply(cancertypes, function(i) sapply(dzes, function(j) mwp(cancerdata, ref1kgdata, pop, "c(SNP_risk, SNP_protective)", i, j, tflag))))
return(mat.numSNPs)
}
}
# ## EUR only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# EUR.procdata = preprocess_mat_by_pop("population == \"EUR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# EUR.mat.pval = as.data.frame(EUR.procdata[1])
# EUR.mat.pval.m = as.data.frame(EUR.procdata[2])
# EUR.cancer.match.ss1 = as.data.frame(EUR.procdata[3])
# EUR.mat.nums = preprocess_mat_by_pop("population == \"EUR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# EUR.mat.numSNPs = preprocess_mat_by_pop("population == \"EUR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
#
# ## EAS only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# EAS.procdata = preprocess_mat_by_pop("population == \"EAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# EAS.mat.pval = as.data.frame(EAS.procdata[1])
# EAS.mat.pval.m = as.data.frame(EAS.procdata[2])
# EAS.cancer.match.ss1 = as.data.frame(EAS.procdata[3])
# EAS.mat.nums = preprocess_mat_by_pop("population == \"EAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# EAS.mat.numSNPs = preprocess_mat_by_pop("population == \"EAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
#
# ## AMR only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# AMR.procdata = preprocess_mat_by_pop("population == \"AMR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# AMR.mat.pval = as.data.frame(AMR.procdata[1])
# AMR.mat.pval.m = as.data.frame(AMR.procdata[2])
# AMR.cancer.match.ss1 = as.data.frame(AMR.procdata[3])
# AMR.mat.nums = preprocess_mat_by_pop("population == \"AMR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# AMR.mat.numSNPs = preprocess_mat_by_pop("population == \"AMR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
#
# ## AFR only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# AFR.procdata = preprocess_mat_by_pop("population == \"AFR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# AFR.mat.pval = as.data.frame(AFR.procdata[1])
# AFR.mat.pval.m = as.data.frame(AFR.procdata[2])
# AFR.cancer.match.ss1 = as.data.frame(AFR.procdata[3])
# AFR.mat.nums = preprocess_mat_by_pop("population == \"AFR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# AFR.mat.numSNPs = preprocess_mat_by_pop("population == \"AFR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
#
# ## SAS only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# SAS.procdata = preprocess_mat_by_pop("population == \"SAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# SAS.mat.pval = as.data.frame(SAS.procdata[1])
# SAS.mat.pval.m = as.data.frame(SAS.procdata[2])
# SAS.cancer.match.ss1 = as.data.frame(SAS.procdata[3])
# SAS.mat.nums = preprocess_mat_by_pop("population == \"SAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# SAS.mat.numSNPs = preprocess_mat_by_pop("population == \"SAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
# Plot matrices
#################################################################
# Heatmap of p.values (adj) of cancertypes vs diseases in VariMed
## ggplot cant do heatmap v well
## ggplot cant border cells v well
library(data.table)
plotmatrix <- function(mat, colp, xfontsize, yfontsize, colors, pop)
{
## discretize/categorize p values into 3 categories with new column p.cat
mat$p.cat = ifelse(mat[,colp] <= 0.01, "0-0.01",
ifelse(mat[,colp] > 0.01 & mat[,colp] <=0.05, "0.01-0.05",
ifelse(mat[,colp] > 0.05 & mat[,colp] <=0.1, "0.05-0.1", "0.1-")))
## change >0.1 to NA
# mat[,colp] = ifelse(mat[,colp] > 0.1, NA, mat[,colp])
# mat = mat[complete.cases(mat),]
## heatmap 1 -- everything
pl = ggplot(mat, aes(histology_abbreviation, broad_phenotype)) +
geom_tile(aes(fill = p.cat), colour = "white") +
theme(legend.position = "none",
axis.text.x = element_text(size = xfontsize, angle = 330,
hjust = 0, color = "black"),
axis.text.y = element_text(size = yfontsize)) +
theme(legend.position="right") +
scale_fill_manual(values = colors, na.value = "white") +
labs(y=paste(pop,"_broad_phenotype"),x="histology_abbreviation")
# +
# theme(panel.border=element_rect(fill = NA, colour=alpha('black', 0.5), size=5))
pl
}
## EUR
x11(type="cairo")
plotmatrix(EUR.mat.pval.m, colp="LLR.p.adj", xfontsize=12, yfontsize=8, colors=c("black","#3794bf","#df8640","gray90"), "EUR")
# ggsave("EUR_alldz_allcancertypes_zm.pdf", device = "pdf")
x11(type="cairo")
plotmatrix(EUR.cancer.match.ss1, colp="LLR.p.adj", xfontsize=18, yfontsize=18, colors=c("black","gray90", "#3794bf","#df8640"), "EUR")
# ggsave("EUR_matched_allcancertypes_zm.pdf", device = "pdf")
## plot only complete and non-NA cases #####==============
x11(type="cairo")
EUR.mat.pval.m.new = EUR.mat.pval.m
EUR.mat.pval.m.new[,"LLR.p.adj"][EUR.mat.pval.m.new$LLR.p.adj>0.1] = NA
plotmatrix(EUR.mat.pval.m.new[complete.cases(EUR.mat.pval.m.new),], colp="LLR.p.adj", xfontsize=18, yfontsize=18, colors=c("black","#3794bf","#df8640","gray90"), "EUR")
# ggsave("EUR_complete_cases_zm.pdf", device = "pdf")
####### bubble plot for complete cases EUR #######
cc.eur = EUR.mat.pval.m.new[complete.cases(EUR.mat.pval.m.new),]
cc.eur$p.cat = ifelse(cc.eur[,"LLR.p.adj"] <= 0.01, "0-0.01",
ifelse(cc.eur[,"LLR.p.adj"] > 0.01 & cc.eur[,"LLR.p.adj"] <=0.05, "0.01-0.05",
ifelse(cc.eur[,"LLR.p.adj"] > 0.05 & cc.eur[,"LLR.p.adj"] <=0.1, "0.05-0.1", "0.1-")))
colors=c("black","#3794bf","#df8640","gray90")
cc.eur$numSNPs = unlist(mapply(function(x,y) return(data.frame(EUR.mat.numSNPs[x,y])), cc.eur$broad_phenotype, cc.eur$histology_abbreviation))
cc.eur$numSNPs = as.character(cc.eur$numSNPs)
cc.eur$onekgSNPs =
as.numeric(cc.eur$numSNPs %>% { gsub("1KGP3\\: ", "", .) } %>% { gsub(" \\|TCGA: .*", "", .) })
cc.eur$tcgaSNPs =
as.numeric(cc.eur$numSNPs %>% { gsub(".*TCGA: ", "", .) })
cc.eur$numSNPsMean = (cc.eur$onekgSNPs + cc.eur$tcgaSNPs) / 2
cc.eur$p.cat = factor(cc.eur$p.cat)
# order of cancer type [MANUAL]
cc.eur$histology_abbreviation = factor(cc.eur$histology_abbreviation,
levels=c("Prost-AdenoCA","Skin-Melanoma","Eso-AdenoCA","Stomach-AdenoCA","Lymph-CLL"))
# cc.eur$broad_phenotype = factor(cc.eur$broad_phenotype,
# levels=rev(c("")))
write.table(cc.eur, "cc.eur.txt", sep="\t", quote=F)
## p values for more, less and 2 sided
mwp <- function(cancerdata, ref1kgdata, pop, LLRcol, cancertype, dz)
{
tcga = subset(cancerdata, eval(parse(text=pop)) & broad_phenotype == dz & histology_abbreviation == cancertype)
onekg = subset(ref1kgdata, eval(parse(text=pop)) & broad_phenotype == dz)
# if tflag == 0, unadjusted MW pvalues
# arbitrary min datapoint of 10 in either dataset to compute MW test
# 2.sided unadjusted
twosided = ifelse(nrow(tcga) > 10 & nrow(onekg) > 10, wilcox.test(tcga[,LLRcol], onekg[,LLRcol])$p.value, NA)
greater = ifelse(nrow(tcga) > 10 & nrow(onekg) > 10, wilcox.test(tcga[,LLRcol], onekg[,LLRcol], alternative = "greater")$p.value, NA)
less = ifelse(nrow(tcga) > 10 & nrow(onekg) > 10, wilcox.test(tcga[,LLRcol], onekg[,LLRcol], alternative = "less")$p.value, NA)
return(data.frame(dz, cancertype, twosided, greater, less, stringsAsFactors = FALSE))
}
## manual ## note that the prostate-prostate here contains females + males in 1KGP3
cc.eur.bp = c("Hair_color", "Melanoma", "Hair_color", "Prostate_cancer", "Melanoma", "Behcet's_disease")
cc.eur.ha = c("Eso-AdenoCA", "Eso-AdenoCA", "Lymph-CLL", "Prost-AdenoCA", "Skin-Melanoma", "Stomach-AdenoCA")
mwpp.eur = as.data.frame(t(mapply(function(x,y) mwp(LR.cancer.final, LR.1kg.final, "population==\"EUR\"", "LLR", x,y), cc.eur.ha, cc.eur.bp)))
colnames(mwpp.eur) = c("broad_phenotype","histology_abbreviation","twosided","greater","less")
mwpp.eur$risk = ifelse(as.numeric(mwpp.eur$greater) < as.numeric(mwpp.eur$less), "greater", "less")
write.table(as.matrix(mwpp.eur), "cc.eur.mwp.txt", sep="\t", quote=F)
# plot
x11(type="cairo")
ggplot(cc.eur, aes(x=histology_abbreviation, y=broad_phenotype, size=numSNPsMean, fill=p.cat, color=p.cat)) +
geom_point(shape = 21) +
theme(legend.position = "none",
axis.text.x = element_text(size = 18, angle = 330,
hjust = 0, color = "black"),
axis.text.y = element_text(size = 18)) +
theme(legend.position="right") +
scale_fill_manual(values = colors, na.value = "white") +
scale_color_manual(values = colors, na.value = "white") +
labs(y=paste("EUR","_broad_phenotype"),x="histology_abbreviation") +
scale_size_area(max_size = 20) +
theme(legend.title = element_text(size=15, face="bold"), legend.text=element_text(size=15))
## this add label for numbers
# + geom_text(aes(label=round(numSNPsMean)), size=5, nudge_x=0.0, nudge_y=0.6)
# ggsave("EUR_complete_cases_bubble_zm.pdf", device = "pdf", useDingbats=FALSE, width = 10.9, height = 9.36)
#####==============
## EAS
x11(type="cairo")
plotmatrix(EAS.mat.pval.m, colp="LLR.p.adj", xfontsize=12, yfontsize=8, colors=c("black","#3794bf","#df8640","gray90"), "EAS")
# ggsave("EAS_alldz_allcancertypes_zm.pdf", device = "pdf")
x11(type="cairo")
plotmatrix(EAS.cancer.match.ss1, colp="LLR.p.adj", xfontsize=18, yfontsize=18, colors=c("black","#df8640","gray90", "#3794bf"), "EAS")
# ggsave("EAS_matched_allcancertypes_zm.pdf", device = "pdf")
## plot only complete and non-NA cases
x11(type="cairo")
EAS.mat.pval.m.new = EAS.mat.pval.m
EAS.mat.pval.m.new[,"LLR.p.adj"][EAS.mat.pval.m.new$LLR.p.adj>0.1] = NA
plotmatrix(EAS.mat.pval.m.new[complete.cases(EAS.mat.pval.m.new),], colp="LLR.p.adj", xfontsize=10, yfontsize=10, colors=c("black","#3794bf","#df8640","gray90"), "EAS")
# ggsave("EAS_complete_cases_zm.pdf", device = "pdf")
####### bubble plot for complete cases EAS #######
cc.eas = EAS.mat.pval.m.new[complete.cases(EAS.mat.pval.m.new),]
cc.eas$p.cat = ifelse(cc.eas[,"LLR.p.adj"] <= 0.01, "0-0.01",
ifelse(cc.eas[,"LLR.p.adj"] > 0.01 & cc.eas[,"LLR.p.adj"] <=0.05, "0.01-0.05",
ifelse(cc.eas[,"LLR.p.adj"] > 0.05 & cc.eas[,"LLR.p.adj"] <=0.1, "0.05-0.1", "0.1-")))
colors=c("black","#3794bf","#df8640","gray90")
cc.eas$numSNPs = unlist(mapply(function(x,y) return(data.frame(EAS.mat.numSNPs[x,y])), cc.eas$broad_phenotype, cc.eas$histology_abbreviation))
cc.eas$numSNPs = as.character(cc.eas$numSNPs)
cc.eas$onekgSNPs =
as.numeric(cc.eas$numSNPs %>% { gsub("1KGP3\\: ", "", .) } %>% { gsub(" \\|TCGA: .*", "", .) })
cc.eas$tcgaSNPs =
as.numeric(cc.eas$numSNPs %>% { gsub(".*TCGA: ", "", .) })
cc.eas$numSNPsMean = (cc.eas$onekgSNPs + cc.eas$tcgaSNPs) / 2
cc.eas$p.cat = factor(cc.eas$p.cat)
# order of cancer type
cc.eas$broad_phenotype = factor(cc.eas$broad_phenotype,
levels=c("Alcohol_dependence","Atopic_eczema","Celiac_disease","Coronary_artery_disease","Duodenal_ulcer","Glaucoma","Graves'_disease","Kawasaki_disease","Multiple_sclerosis","Myocardial_infarction","Narcolepsy","Polycystic_ovary_syndrome","Primary_biliary_cirrhosis","Rheumatoid_arthritis","Schizophrenia","Systemic_lupus_erythematosus","Systemic_sclerosis","Type_1_diabetes","Type_2_diabetes","Uterine_leiomyoma","Vitiligo","Intracranial_aneurysm","Osteoporosis","Ulcerative_colitis","Inflammatory_bowel_disease","Nasopharyngeal_carcinoma","Lung_cancer","Endometrial_cancer","Esophageal_cancer","Thyroid_cancer","HBV-induced_hepatocellular_carcinoma"))
write.table(cc.eas, "cc.eas.txt", sep="\t", quote=F)
## cc.eas.bp and cc.eas.ha have to be in order and matched
cc.eas.bp = c("Alcohol_dependence","Atopic_eczema","Celiac_disease","Coronary_artery_disease","Duodenal_ulcer","Glaucoma","Graves'_disease","Kawasaki_disease","Multiple_sclerosis","Myocardial_infarction","Narcolepsy","Polycystic_ovary_syndrome","Primary_biliary_cirrhosis","Rheumatoid_arthritis","Schizophrenia","Systemic_lupus_erythematosus","Systemic_sclerosis","Type_1_diabetes","Type_2_diabetes","Uterine_leiomyoma","Vitiligo","Intracranial_aneurysm","Osteoporosis","Ulcerative_colitis","Inflammatory_bowel_disease","Nasopharyngeal_carcinoma","Lung_cancer","Endometrial_cancer","Esophageal_cancer","Thyroid_cancer","HBV-induced_hepatocellular_carcinoma")
cc.eas.ha = c("Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Stomach-AdenoCA","Stomach-AdenoCA","Stomach-AdenoCA","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC")
mwpp.eas = as.data.frame(t(mapply(function(x,y) mwp(LR.cancer.final, LR.1kg.final, "population==\"EAS\"", "LLR", x,y), cc.eas.ha, cc.eas.bp)))
colnames(mwpp.eas) = c("broad_phenotype","histology_abbreviation","twosided","greater","less")
mwpp.eas$risk = ifelse(as.numeric(mwpp.eas$greater) < as.numeric(mwpp.eas$less), "greater", "less")
write.table(as.matrix(mwpp.eas), "cc.eas.mwp.txt", sep="\t", quote=F)
# plot
ggplot(cc.eas, aes(x=histology_abbreviation, y=broad_phenotype, size=numSNPsMean, fill=p.cat, color=p.cat)) +
geom_point(shape = 21) +
theme(legend.position = "none",
axis.text.x = element_text(size = 18, angle = 330,
hjust = 0, color = "black"),
axis.text.y = element_text(size = 18)) +
theme(legend.position="right") +
scale_fill_manual(values = colors, na.value = "white") +
scale_color_manual(values = rep("white",3), na.value = "white") +
labs(y=paste("EAS","_broad_phenotype"),x="histology_abbreviation") +
scale_size_area(max_size = 20) +
theme(legend.title = element_text(size=15, face="bold"), legend.text=element_text(size=15))
## this add label for numbers
# + geom_text(aes(label=round(numSNPsMean)), size=5, nudge_x=0.0, nudge_y=0.6)
# ggsave("EAS_complete_cases_bubble_zm.pdf", device = "pdf", useDingbats=FALSE)
## 10.4 x 14.4
################################################################################
# ## AFR
x11(type="cairo")
plotmatrix(AFR.mat.pval.m, colp="LLR.p.adj", xfontsize=8, yfontsize=5, colors=c("#3794bf","gray90","#df8640","black"), "AFR")
# ggsave("AFR_alldz_allcancertypes_zm.pdf", device = "pdf")
x11(type="cairo")
plotmatrix(AFR.cancer.match.ss1, colp="LLR.p.adj", xfontsize=5, yfontsize=7, colors=c("gray90", "#3794bf","black","#df8640"), "AFR")
# ggsave("AFR_matched_allcancertypes_zm.pdf", device = "pdf")
## plot only complete and non-NA cases
x11(type="cairo")
AFR.mat.pval.m.new = AFR.mat.pval.m
AFR.mat.pval.m.new[,"LLR.p.adj"][AFR.mat.pval.m.new$LLR.p.adj>0.1] = NA
plotmatrix(AFR.mat.pval.m.new[complete.cases(AFR.mat.pval.m.new),], colp="LLR.p.adj", xfontsize=10, yfontsize=10, colors=c("#3794bf","black","#df8640","gray90"), "AFR")
# ggsave("AFR_complete_cases_zm.pdf", device = "pdf")
#
# ## AMR
# x11(type="cairo")
# plotmatrix(AMR.mat.pval.m, colp="LLR.p.adj", xfontsize=8, yfontsize=5, colors=c("black","#3794bf","#df8640","gray90"), "AMR")
# plotmatrix(AMR.cancer.match.ss1, colp="LLR.p.adj", xfontsize=5, yfontsize=7, colors=c("black","#df8640","gray90", "#3794bf"), "AMR")
#
# ## SAS
# x11(type="cairo")
# plotmatrix(SAS.mat.pval.m, colp="LLR.p.adj", xfontsize=8, yfontsize=5, colors=c("gray90", "black","#3794bf","#df8640"), "SAS")
# plotmatrix(SAS.cancer.match.ss1, colp="LLR.p.adj", xfontsize=5, yfontsize=7, colors=c("black","#df8640","gray90", "#3794bf"), "SAS")
# violin plots
plotviolin <- function(cancerdata, refdata, popparse, cancertypeparse, dz)
{
tcga = subset(cancerdata, eval(parse(text=popparse)) & eval(parse(text=cancertypeparse)),
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
kgp3 = subset(refdata, eval(parse(text=popparse)),
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
merged = rbind(tcga, kgp3)
## mann whitney test for melanoma
mm.1kgp3 = kgp3[kgp3$broad_phenotype==dz,]
mm.tcga = tcga[tcga$broad_phenotype==dz,]
mm.merged = merged[merged$broad_phenotype == dz,]
print(paste(popparse,"_", cancertypeparse, "_", dz, " p.val, 2-sided"))
jm1 = wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR)$p.value
print(paste(popparse,"_", cancertypeparse, "_", dz, " p.val, less"))
jm2 = wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR, alternative = "less")$p.value ## x < y
print(paste(popparse,"_", cancertypeparse, "_", dz, " p.val, greater"))
jm3 = wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR, alternative = "greater")$p.value ## x > y
## plot violin and boxplot for melanoma
pd <- position_dodge(0.9)
pmain2 = ggplot(mm.merged, aes(x=dataset, y=LLR, fill = factor(dataset)))
phisto2 = geom_violin()
phisto3 = geom_boxplot(width=.1, outlier.size=0, fill="grey50", position=pd)
phisto4 = stat_summary(fun.y=median)
ptitle = ggtitle(paste(gsub("population==","",popparse), " ", gsub("histology_abbreviation==", "", cancertypeparse)))
plabels = labs(x=dz,y="LLR distribution")
jm4 = pmain2 + phisto2 + phisto3 + phisto4 +
ptitle + plabels + theme(legend.position="none") +
scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
# +
# geom_dotplot(binaxis='y', stackdir='center', dotsize=0.1, colour = "black", fill = "black")
print(jm4)
return(list(jm1,jm2,jm3))
}
## item1: 2 sided unadj pvalue, 1sided x<y, 1sided x>y
# Melanoma-Melanoma
x11(type="cairo")
p.melanoma.melanoma = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Skin-Melanoma\"",
dz = "Melanoma"))
names(p.melanoma.melanoma) = c("2sided","1sided_less","1sided_greater")
p.melanoma.melanoma$twosided_adj.p = EUR.mat.pval.m[EUR.mat.pval.m$broad_phenotype=="Melanoma" & EUR.mat.pval.m$histology_abbreviation=="Skin-Melanoma",]$LLR.p.adj
p.melanoma.melanoma$size = EUR.mat.numSNPs["Melanoma","Skin-Melanoma"]
# ggsave("violin-melanoma.melanoma2_zm.pdf", device = "pdf", useDingbats=FALSE)
# melanoma-obesity
x11(type="cairo")
p.melanoma.obesity = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Skin-Melanoma\"",
dz = "Obesity"))
names(p.melanoma.obesity) = c("2sided","1sided_less","1sided_greater")
p.melanoma.obesity$twosided_adj.p = EUR.mat.pval.m[EUR.mat.pval.m$broad_phenotype=="Obesity" & EUR.mat.pval.m$histology_abbreviation=="Skin-Melanoma",]$LLR.p.adj
p.melanoma.obesity$size = EUR.mat.numSNPs["Obesity","Skin-Melanoma"]
# ggsave("violin-melanoma-obesity_zm.pdf", device = "pdf", useDingbats=FALSE)
# Prost-AdenoCA and Prostate_cancer
gender.1kg = subset(read.delim("1kg-sampleinfo.txt", header=T, stringsAsFactors=FALSE, sep="\t"), select=c(sampleID, Gender))
LR.1kg.final.gender = merge(LR.1kg.final, gender.1kg, by.x="sample.id", by.y="sampleID", all.x=TRUE)
LR.1kg.final.male = subset(LR.1kg.final.gender, Gender == "male")
x11(type="cairo")
p.prostate.prostate = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Prost-AdenoCA\"",
dz = "Prostate_cancer"))
# ggsave("violin-prostate-prostate_zm.pdf", device = "pdf", useDingbats=FALSE)
x11(type="cairo")
## male only
p.prostate.prostate.m = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final.male,
"population==\"EUR\"",
"histology_abbreviation==\"Prost-AdenoCA\"",
dz = "Prostate_cancer"))
names(p.prostate.prostate.m) = c("twosided","onesided_less","onesided_greater")
p.prostate.prostate.m$twosided.adj = p.prostate.prostate.m$twosided *
(cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","LLR.p.adj"] /
cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","LLR.p"] *
cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","rank"])
## male + female
names(p.prostate.prostate) = c("twosided","onesided_less","onesided_greater")
p.prostate.prostate$twosided.adj = p.prostate.prostate$twosided *
(cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","LLR.p.adj"] /
cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","LLR.p"] *
cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","rank"])
# ggsave("violin-prostate-prostate-male-only_zm.pdf", device = "pdf", useDingbats=FALSE)
## to check increase or decrease in risk
as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Stomach-AdenoCA\"",
dz = "Behcet's_disease"))
dzs = c("Alcohol_dependence","Atopic_eczema","Celiac_disease","Coronary_artery_disease","Duodenal_ulcer","Glaucoma","Graves'_disease","Kawasaki_disease","Multiple_sclerosis","Myocardial_infarction","Narcolepsy","Polycystic_ovary_syndrome","Primary_biliary_cirrhosis","Rheumatoid_arthritis","Schizophrenia","Systemic_lupus_erythematosus","Systemic_sclerosis","Type_1_diabetes","Type_2_diabetes","Uterine_leiomyoma","Vitiligo","Intracranial_aneurysm","Osteoporosis","Ulcerative_colitis","Inflammatory_bowel_disease","Nasopharyngeal_carcinoma","Lung_cancer","Endometrial_cancer","Esophageal_cancer","Thyroid_cancer","HBV-induced_hepatocellular_carcinoma")
aa = sapply(dzs, function(x) plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EAS\"",
"histology_abbreviation==\"Liver-HCC\"",
dz = x))
row.names(aa) = c("2sided","1sided_less","1sided_greater_decrease")
dzs = c("Intracranial_aneurysm","Osteoporosis","Ulcerative_colitis")
ab = sapply(dzs, function(x) plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EAS\"",
"histology_abbreviation==\"Stomach-AdenoCA\"",
dz = x))
row.names(ab) = c("2sided","1sided_less","1sided_greater_decrease")
# breast_cancer and Breast-AdenoCA
x11(type="cairo")
p.breast.breast = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Breast-AdenoCA\"",
dz = "Breast_cancer"))
LR.1kg.final.female = subset(LR.1kg.final.gender, Gender == "female")
x11(type="cairo")
p.breast.breast.f = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final.female,
"population==\"EUR\"",
"histology_abbreviation==\"Breast-AdenoCA\"",
dz = "Breast_cancer"))
########################
# histograms: compare (1KGp3 EUR) vs (ICGC_TCGA EUR melanoma patients) LRs for ALL VariMed diseases
tcga.EUR.melanoma = subset(LR.cancer.final, population == "EUR" & histology_abbreviation == "Skin-Melanoma",
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
kgp3.EUR.melanoma = subset(LR.1kg.final, population == "EUR",
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
merged.EUR.melanoma = rbind(tcga.EUR.melanoma, kgp3.EUR.melanoma)
## plotting
x11(type="cairo")
pmain = ggplot(tcga.EUR.melanoma[tcga.EUR.melanoma$broad_phenotype %in% dz.tcga[1:10],], aes(x=broad_phenotype, y=LLR))
phisto = geom_boxplot()
ptitle = ggtitle("Skin-Melanoma")
# pfacet = facet_wrap( ~ broad_phenotype, scales="free", ncol=1)
plabels = labs(x="broad phenotype",y="LLR distribution")
# paxes = theme(axis.title.x = element_text(face = "bold",colour = "black", size = 20),
# axis.title.y = element_text(face = "bold",colour = "black", size = 20),
# axis.text.x = element_text(size = 15), axis.text.y = element_text(size = 15))
pmain + phisto + ptitle + plabels + scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip() + geom_jitter(height = 0, width = 0.1)
## new
by=10
for (i in seq(1,length(dz.tcga),by=by))
{
pmain2 = ggplot(merged.EUR.melanoma[merged.EUR.melanoma$broad_phenotype %in% dz.tcga[i:(i+by-1)],],
aes(x=broad_phenotype, y=LLR, fill = factor(dataset)))
phisto2 = geom_boxplot(width=0.7, outlier.shape=3) ## shape 3 = '+'
j = pmain2 + phisto2 + ptitle + plabels +
scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
print(j)
}
# Mann-Whitney tests & violin plots for TCGA 'Skin-Melanoma' EUR patients for LRs for 'Melanoma' (pos) and 'Obesity' (neg) and 'Renal_cell_cancer'
## mann whitney test for melanoma
mm.1kgp3 = kgp3.EUR.melanoma[kgp3.EUR.melanoma$broad_phenotype=="Melanoma",]
mm.tcga = tcga.EUR.melanoma[tcga.EUR.melanoma$broad_phenotype=="Melanoma",]
mm.merged = merged.EUR.melanoma[merged.EUR.melanoma$broad_phenotype == "Melanoma",]
print("Melanoma-Melanoma p.val, 2-sided")
wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR)$p.value
print("Melanoma-Melanoma p.val, less")
wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR, alternative = "less")$p.value ## x < y
print("Melanoma-Melanoma p.val, greater")
wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR, alternative = "greater")$p.value ## x > y
## plot violin and boxplot for melanoma
pd <- position_dodge(0.9)
pmain2 = ggplot(mm.merged, aes(x=dataset, y=LLR, fill = factor(dataset)))
phisto2 = geom_violin()
phisto3 = geom_boxplot(width=.1, outlier.size=0, fill="grey50", position=pd)
phisto4 = stat_summary(fun.y=median)
plabels = labs(x="Melanoma",y="LLR distribution")
pmain2 + phisto2 + phisto3 + phisto4 +
ptitle + plabels + theme(legend.position="none") +
scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
#---------
## mann whitney test for prostate-prostate (male-only)
tcga.EUR.prostate = subset(LR.cancer.final, population == "EUR" & histology_abbreviation == "Prost-AdenoCA",
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
kgp3.EUR.prostate = subset(LR.1kg.final.male, population == "EUR",
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
pp.1kgp3 = kgp3.EUR.prostate[kgp3.EUR.prostate$broad_phenotype=="Prostate_cancer",]
pp.tcga = tcga.EUR.prostate[tcga.EUR.prostate$broad_phenotype=="Prostate_cancer",]
print("Prost-AdenoCA-ProstateCancer p.val, 2-sided")
wilcox.test(pp.1kgp3$LLR, pp.tcga$LLR)$p.value
print("Prost-AdenoCA-ProstateCancer p.val, less")
wilcox.test(pp.1kgp3$LLR, pp.tcga$LLR, alternative = "less")$p.value ## x < y
print("Prost-AdenoCA-ProstateCancer p.val, greater")
wilcox.test(pp.1kgp3$LLR, pp.tcga$LLR, alternative = "greater")$p.value ## x > y
# ## plot violin and boxplot for Renal_cell_cancer
# pd <- position_dodge(0.9)
# pmain2 = ggplot(mr.merged, aes(x=dataset, y=LLR, fill = factor(dataset)))
# phisto2 = geom_violin()
# phisto3 = geom_boxplot(width=.1, outlier.size=0, fill="grey50", position=pd)
# phisto4 = stat_summary(fun.y=median)
# plabels = labs(x="Renal_cell_cancer",y="LLR distribution")
# pmain2 + phisto2 + phisto3 + phisto4 +
# ptitle + plabels + theme(legend.position="none") +
# scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
# ## mann whitney test for Obesity
# mo.1kgp3 = kgp3.EUR.melanoma[kgp3.EUR.melanoma$broad_phenotype=="Obesity",]
# mo.tcga = tcga.EUR.melanoma[tcga.EUR.melanoma$broad_phenotype=="Obesity",]
# mo.merged = merged.EUR.melanoma[merged.EUR.melanoma$broad_phenotype == "Obesity",]
#
#
# print("Melanoma-Obesity p.val, 2-sided")
# wilcox.test(mo.1kgp3$LLR, mo.tcga$LLR)$p.value
# print("Melanoma-Obesity p.val, less")
# wilcox.test(mo.1kgp3$LLR, mo.tcga$LLR, alternative = "less")$p.value ## x < y
# print("Melanoma-Obesity p.val, greater")
# wilcox.test(mo.1kgp3$LLR, mo.tcga$LLR, alternative = "greater")$p.value ## x > y
#
# ## plot violin and boxplots for obesity
# pmain2 = ggplot(mo.merged, aes(x=dataset, y=LLR, fill = factor(dataset)))
# phisto2 = geom_violin()
# phisto3 = geom_boxplot(width=.1, outlier.size=0, fill="grey50", position=pd)
# phisto4 = stat_summary(fun.y=median)
# plabels = labs(x="Obesity",y="LLR distribution")
# pmain2 + phisto2 + phisto3 + phisto4 +
# ptitle + plabels + theme(legend.position="none") +
# scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
| /varimed_1KG_TCGA_plots.R | no_license | cjieming/R_codes | R | false | false | 37,177 | r | setwd("/Users/jiemingchen/Documents/varimed/pcawg/dz_risk_var_varimed_staging_LR_final_ext_sex_eth_spop_zm")
## my own library
source("/Users/jiemingchen/R_codes/jmRlib.R")
library(ggplot2)
library(RColorBrewer)
library(plyr)
library(dplyr)
library(reshape2)
library(magrittr)
## input combined risks for 1KGp3
## note that LR = exp(sum(log(LR))) of all SNPs with given dz from same sample; LR_max = SNP with max abs logLR
LR.1kg = read.delim("combined_dz_risk_1000GP_spop_zm.txt", header = T, sep = "\t", stringsAsFactors = FALSE, na.strings = "") ## curr one
LR.1kg.final = LR.1kg %>% mutate(LLR = log10(LR), LLR_max = log10(LR_max))
LR.1kg.final$dataset = "1KGP3"
## input combined risks for ICGC_TCGA
LR.cancer = read.delim("combined_dz_risk_ICGC_TCGA_spop_zm_histology.txt", header = T, sep = "\t", stringsAsFactors = FALSE, na.strings = "")
LR.cancer.final = LR.cancer %>% mutate(LLR = log10(LR), LLR_max = log10(LR_max))
LR.cancer.final$dataset = "ICGC_TCGA"
### some stats
## number of cancer types in ICGC_TCGA patients
cancertypes = sort(unique(LR.cancer.final$histology_abbreviation))
## number of broad_phenotypes
dz.1kg = sort(unique(LR.1kg.final$broad_phenotype))
dz.tcga = sort(unique(LR.cancer.final$broad_phenotype))
# Preprocessing datasets and calculate Mann Whitney test p values (unadj) and BH (adj)
## Preprocessing datasets and calculate Mann Whitney test p values (unadj) and BH (adj) ####
## this function preprocesses the datasets into 3 matrices FOR ONE POPULATION
## Loop Mann-Whitney test for TCGA vs 1KGp3 (will take a while... about 6 min)
## 1: num(dz)-by-num(cancertypes) matrix of MW p values (unadj)
## 2: num(dz)-by-num(cancertypes) matrix of size of subsets (note subsets<10 of size are NA)
## 3: melted down version of 1 with 2 columns of dz and cancertypes as primary keys for p values (unadj)
## 4: melted down version subsetted by cancertypes to match dz_cancers
## input requires columns LLR and histology_abbreviation and broad_phenotype
## tflags 1: MW unadjusted p values
## 2: size of datasets
## 3: numSNPs (mean num over all individuals in each dataset, risk + protective)
preprocess_mat_by_pop <- function(pop, dzes, cancertypes, cancerdata, ref1kgdata, LLRcol, tflag=0)
{
## nested function to make an array of dz by cancer for apply
mwp <- function(cancerdata, ref1kgdata, pop, LLRcol, cancertype, dz, tflag=0)
{
tcga = subset(cancerdata, eval(parse(text=pop)) & broad_phenotype == dz & histology_abbreviation == cancertype)
onekg = subset(ref1kgdata, eval(parse(text=pop)) & broad_phenotype == dz)
# if tflag == 0, unadjusted MW pvalues
# arbitrary min datapoint of 10 in either dataset to compute MW test
# 2.sided unadjusted
# tflag = troubleshooting flag
if (tflag == 0)
{
return(ifelse(nrow(tcga) > 10 & nrow(onekg) > 10, wilcox.test(tcga[,LLRcol], onekg[,LLRcol])$p.value, NA))
}
else if (tflag == 1)
{
## tflag == 1, number of individuals in each dataset
return(paste("1KGP3:", nrow(onekg),"|TCGA:", nrow(tcga)))
}
else
{
## tflag == 2, mean over individuals of number of SNPs = numRiskAlleles + numProtectiveAlleles
numSNPs.tcga = mean(tcga[,"SNP_risk"] + tcga[,"SNP_protective"])
numSNPs.1kgp3 = mean(onekg[,"SNP_risk"] + onekg[,"SNP_protective"])
return(paste("1KGP3:", numSNPs.1kgp3,"|TCGA:", numSNPs.tcga))
}
}
# produce 2 cancertypes-by-dz matrices:
# if tflag == 0, unadjusted MW pvalues
# if tflag == 1, size of datasets (for troubleshooting)
if(tflag == 0)
{
mat.pval = as.data.frame(sapply(cancertypes, function(i) sapply(dzes, function(j) mwp(cancerdata, ref1kgdata, pop, "LLR", i, j, tflag))))
## (3) melt pval matrix for heatmap plotting, by histology,
## + unadj pval + BH-adj p val
mat.pval2 = cbind(rownames(mat.pval), mat.pval)
colnames(mat.pval2)[1] = "broad_phenotype"
mat.pval.m = melt(mat.pval2, variable.name = "histology_abbreviation", value.name = "LLR.p", id.vars = "broad_phenotype")
## BH-adj p values
mat.pval.m$LLR.p.adj = p.adjust(mat.pval.m$LLR.p, method = "BH") ## n = 4014 (excluding NAs)
mat.pval.m$rank = rank(mat.pval.m$LLR.p)
## subset1: cancer match subset
mat.ss1.cancer.match = mat.pval.m %>% subset(broad_phenotype == "Breast_cancer" |
broad_phenotype == "Colorectal_cancer" |
broad_phenotype == "Esophageal_cancer" |
broad_phenotype == "Renal_cell_cancer" |
broad_phenotype == "Renal_cell_carcinoma" |
broad_phenotype == "HCV-induced_hepatocellular_carcinoma" |
broad_phenotype == "HBV-induced_hepatocellular_carcinoma" |
broad_phenotype == "Hepatocellular_carcinoma" |
broad_phenotype == "Lung_adenocarcinoma" |
broad_phenotype == "Lung_cancer" |
broad_phenotype == "Non-Small_cell_lung_cancer" |
broad_phenotype == "Lung_cancer" |
broad_phenotype == "Squamous_cell_carcinoma_of_lungs" |
broad_phenotype == "Follicular_lymphoma" |
broad_phenotype == "Chronic_lymphocytic_leukemia" |
broad_phenotype == "Myeloproliferative_disorders" |
broad_phenotype == "Ovarian_cancer" |
broad_phenotype == "Pancreatic_cancer" |
broad_phenotype == "Prostate_cancer" |
broad_phenotype == "Melanoma" |
broad_phenotype == "Gastric_cancer" |
broad_phenotype == "Papillary_thyroid_cancer" |
broad_phenotype == "Thyroid_cancer")
## return
return(list(mat.pval, mat.pval.m, mat.ss1.cancer.match))
}
else if(tflag == 1)
{
## tflag == 1, number of individuals in each dataset
mat.nums = as.data.frame(sapply(cancertypes, function(i) sapply(dzes, function(j) mwp(cancerdata, ref1kgdata, pop, "LLR", i, j, tflag)))) ## debug
return(mat.nums)
}
else
{
## tflag == 2, mean over individuals of number of SNPs = numRiskAlleles + numProtectiveAlleles
mat.numSNPs = as.data.frame(sapply(cancertypes, function(i) sapply(dzes, function(j) mwp(cancerdata, ref1kgdata, pop, "c(SNP_risk, SNP_protective)", i, j, tflag))))
return(mat.numSNPs)
}
}
# ## EUR only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# EUR.procdata = preprocess_mat_by_pop("population == \"EUR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# EUR.mat.pval = as.data.frame(EUR.procdata[1])
# EUR.mat.pval.m = as.data.frame(EUR.procdata[2])
# EUR.cancer.match.ss1 = as.data.frame(EUR.procdata[3])
# EUR.mat.nums = preprocess_mat_by_pop("population == \"EUR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# EUR.mat.numSNPs = preprocess_mat_by_pop("population == \"EUR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
#
# ## EAS only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# EAS.procdata = preprocess_mat_by_pop("population == \"EAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# EAS.mat.pval = as.data.frame(EAS.procdata[1])
# EAS.mat.pval.m = as.data.frame(EAS.procdata[2])
# EAS.cancer.match.ss1 = as.data.frame(EAS.procdata[3])
# EAS.mat.nums = preprocess_mat_by_pop("population == \"EAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# EAS.mat.numSNPs = preprocess_mat_by_pop("population == \"EAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
#
# ## AMR only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# AMR.procdata = preprocess_mat_by_pop("population == \"AMR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# AMR.mat.pval = as.data.frame(AMR.procdata[1])
# AMR.mat.pval.m = as.data.frame(AMR.procdata[2])
# AMR.cancer.match.ss1 = as.data.frame(AMR.procdata[3])
# AMR.mat.nums = preprocess_mat_by_pop("population == \"AMR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# AMR.mat.numSNPs = preprocess_mat_by_pop("population == \"AMR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
#
# ## AFR only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# AFR.procdata = preprocess_mat_by_pop("population == \"AFR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# AFR.mat.pval = as.data.frame(AFR.procdata[1])
# AFR.mat.pval.m = as.data.frame(AFR.procdata[2])
# AFR.cancer.match.ss1 = as.data.frame(AFR.procdata[3])
# AFR.mat.nums = preprocess_mat_by_pop("population == \"AFR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# AFR.mat.numSNPs = preprocess_mat_by_pop("population == \"AFR\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
#
# ## SAS only ~~~~~~~~~~~~~~~~~~~~~~~~
# ## loop
# ## user system elapsed
# ## 321.887 40.770 364.277
# system.time({
# SAS.procdata = preprocess_mat_by_pop("population == \"SAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=0)
# })
# SAS.mat.pval = as.data.frame(SAS.procdata[1])
# SAS.mat.pval.m = as.data.frame(SAS.procdata[2])
# SAS.cancer.match.ss1 = as.data.frame(SAS.procdata[3])
# SAS.mat.nums = preprocess_mat_by_pop("population == \"SAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "LLR", tflag=1)
# SAS.mat.numSNPs = preprocess_mat_by_pop("population == \"SAS\"", dz.tcga, cancertypes, LR.cancer.final, LR.1kg.final, "c(SNP_risk, SNP_protective)", tflag=2)
# Plot matrices
#################################################################
# Heatmap of p.values (adj) of cancertypes vs diseases in VariMed
## ggplot cant do heatmap v well
## ggplot cant border cells v well
library(data.table)
plotmatrix <- function(mat, colp, xfontsize, yfontsize, colors, pop)
{
## discretize/categorize p values into 3 categories with new column p.cat
mat$p.cat = ifelse(mat[,colp] <= 0.01, "0-0.01",
ifelse(mat[,colp] > 0.01 & mat[,colp] <=0.05, "0.01-0.05",
ifelse(mat[,colp] > 0.05 & mat[,colp] <=0.1, "0.05-0.1", "0.1-")))
## change >0.1 to NA
# mat[,colp] = ifelse(mat[,colp] > 0.1, NA, mat[,colp])
# mat = mat[complete.cases(mat),]
## heatmap 1 -- everything
pl = ggplot(mat, aes(histology_abbreviation, broad_phenotype)) +
geom_tile(aes(fill = p.cat), colour = "white") +
theme(legend.position = "none",
axis.text.x = element_text(size = xfontsize, angle = 330,
hjust = 0, color = "black"),
axis.text.y = element_text(size = yfontsize)) +
theme(legend.position="right") +
scale_fill_manual(values = colors, na.value = "white") +
labs(y=paste(pop,"_broad_phenotype"),x="histology_abbreviation")
# +
# theme(panel.border=element_rect(fill = NA, colour=alpha('black', 0.5), size=5))
pl
}
## EUR
x11(type="cairo")
plotmatrix(EUR.mat.pval.m, colp="LLR.p.adj", xfontsize=12, yfontsize=8, colors=c("black","#3794bf","#df8640","gray90"), "EUR")
# ggsave("EUR_alldz_allcancertypes_zm.pdf", device = "pdf")
x11(type="cairo")
plotmatrix(EUR.cancer.match.ss1, colp="LLR.p.adj", xfontsize=18, yfontsize=18, colors=c("black","gray90", "#3794bf","#df8640"), "EUR")
# ggsave("EUR_matched_allcancertypes_zm.pdf", device = "pdf")
## plot only complete and non-NA cases #####==============
x11(type="cairo")
EUR.mat.pval.m.new = EUR.mat.pval.m
EUR.mat.pval.m.new[,"LLR.p.adj"][EUR.mat.pval.m.new$LLR.p.adj>0.1] = NA
plotmatrix(EUR.mat.pval.m.new[complete.cases(EUR.mat.pval.m.new),], colp="LLR.p.adj", xfontsize=18, yfontsize=18, colors=c("black","#3794bf","#df8640","gray90"), "EUR")
# ggsave("EUR_complete_cases_zm.pdf", device = "pdf")
####### bubble plot for complete cases EUR #######
cc.eur = EUR.mat.pval.m.new[complete.cases(EUR.mat.pval.m.new),]
cc.eur$p.cat = ifelse(cc.eur[,"LLR.p.adj"] <= 0.01, "0-0.01",
ifelse(cc.eur[,"LLR.p.adj"] > 0.01 & cc.eur[,"LLR.p.adj"] <=0.05, "0.01-0.05",
ifelse(cc.eur[,"LLR.p.adj"] > 0.05 & cc.eur[,"LLR.p.adj"] <=0.1, "0.05-0.1", "0.1-")))
colors=c("black","#3794bf","#df8640","gray90")
cc.eur$numSNPs = unlist(mapply(function(x,y) return(data.frame(EUR.mat.numSNPs[x,y])), cc.eur$broad_phenotype, cc.eur$histology_abbreviation))
cc.eur$numSNPs = as.character(cc.eur$numSNPs)
cc.eur$onekgSNPs =
as.numeric(cc.eur$numSNPs %>% { gsub("1KGP3\\: ", "", .) } %>% { gsub(" \\|TCGA: .*", "", .) })
cc.eur$tcgaSNPs =
as.numeric(cc.eur$numSNPs %>% { gsub(".*TCGA: ", "", .) })
cc.eur$numSNPsMean = (cc.eur$onekgSNPs + cc.eur$tcgaSNPs) / 2
cc.eur$p.cat = factor(cc.eur$p.cat)
# order of cancer type [MANUAL]
cc.eur$histology_abbreviation = factor(cc.eur$histology_abbreviation,
levels=c("Prost-AdenoCA","Skin-Melanoma","Eso-AdenoCA","Stomach-AdenoCA","Lymph-CLL"))
# cc.eur$broad_phenotype = factor(cc.eur$broad_phenotype,
# levels=rev(c("")))
write.table(cc.eur, "cc.eur.txt", sep="\t", quote=F)
## p values for more, less and 2 sided
mwp <- function(cancerdata, ref1kgdata, pop, LLRcol, cancertype, dz)
{
tcga = subset(cancerdata, eval(parse(text=pop)) & broad_phenotype == dz & histology_abbreviation == cancertype)
onekg = subset(ref1kgdata, eval(parse(text=pop)) & broad_phenotype == dz)
# if tflag == 0, unadjusted MW pvalues
# arbitrary min datapoint of 10 in either dataset to compute MW test
# 2.sided unadjusted
twosided = ifelse(nrow(tcga) > 10 & nrow(onekg) > 10, wilcox.test(tcga[,LLRcol], onekg[,LLRcol])$p.value, NA)
greater = ifelse(nrow(tcga) > 10 & nrow(onekg) > 10, wilcox.test(tcga[,LLRcol], onekg[,LLRcol], alternative = "greater")$p.value, NA)
less = ifelse(nrow(tcga) > 10 & nrow(onekg) > 10, wilcox.test(tcga[,LLRcol], onekg[,LLRcol], alternative = "less")$p.value, NA)
return(data.frame(dz, cancertype, twosided, greater, less, stringsAsFactors = FALSE))
}
## manual ## note that the prostate-prostate here contains females + males in 1KGP3
cc.eur.bp = c("Hair_color", "Melanoma", "Hair_color", "Prostate_cancer", "Melanoma", "Behcet's_disease")
cc.eur.ha = c("Eso-AdenoCA", "Eso-AdenoCA", "Lymph-CLL", "Prost-AdenoCA", "Skin-Melanoma", "Stomach-AdenoCA")
mwpp.eur = as.data.frame(t(mapply(function(x,y) mwp(LR.cancer.final, LR.1kg.final, "population==\"EUR\"", "LLR", x,y), cc.eur.ha, cc.eur.bp)))
colnames(mwpp.eur) = c("broad_phenotype","histology_abbreviation","twosided","greater","less")
mwpp.eur$risk = ifelse(as.numeric(mwpp.eur$greater) < as.numeric(mwpp.eur$less), "greater", "less")
write.table(as.matrix(mwpp.eur), "cc.eur.mwp.txt", sep="\t", quote=F)
# plot
x11(type="cairo")
ggplot(cc.eur, aes(x=histology_abbreviation, y=broad_phenotype, size=numSNPsMean, fill=p.cat, color=p.cat)) +
geom_point(shape = 21) +
theme(legend.position = "none",
axis.text.x = element_text(size = 18, angle = 330,
hjust = 0, color = "black"),
axis.text.y = element_text(size = 18)) +
theme(legend.position="right") +
scale_fill_manual(values = colors, na.value = "white") +
scale_color_manual(values = colors, na.value = "white") +
labs(y=paste("EUR","_broad_phenotype"),x="histology_abbreviation") +
scale_size_area(max_size = 20) +
theme(legend.title = element_text(size=15, face="bold"), legend.text=element_text(size=15))
## this add label for numbers
# + geom_text(aes(label=round(numSNPsMean)), size=5, nudge_x=0.0, nudge_y=0.6)
# ggsave("EUR_complete_cases_bubble_zm.pdf", device = "pdf", useDingbats=FALSE, width = 10.9, height = 9.36)
#####==============
## EAS
x11(type="cairo")
plotmatrix(EAS.mat.pval.m, colp="LLR.p.adj", xfontsize=12, yfontsize=8, colors=c("black","#3794bf","#df8640","gray90"), "EAS")
# ggsave("EAS_alldz_allcancertypes_zm.pdf", device = "pdf")
x11(type="cairo")
plotmatrix(EAS.cancer.match.ss1, colp="LLR.p.adj", xfontsize=18, yfontsize=18, colors=c("black","#df8640","gray90", "#3794bf"), "EAS")
# ggsave("EAS_matched_allcancertypes_zm.pdf", device = "pdf")
## plot only complete and non-NA cases
x11(type="cairo")
EAS.mat.pval.m.new = EAS.mat.pval.m
EAS.mat.pval.m.new[,"LLR.p.adj"][EAS.mat.pval.m.new$LLR.p.adj>0.1] = NA
plotmatrix(EAS.mat.pval.m.new[complete.cases(EAS.mat.pval.m.new),], colp="LLR.p.adj", xfontsize=10, yfontsize=10, colors=c("black","#3794bf","#df8640","gray90"), "EAS")
# ggsave("EAS_complete_cases_zm.pdf", device = "pdf")
####### bubble plot for complete cases EAS #######
cc.eas = EAS.mat.pval.m.new[complete.cases(EAS.mat.pval.m.new),]
cc.eas$p.cat = ifelse(cc.eas[,"LLR.p.adj"] <= 0.01, "0-0.01",
ifelse(cc.eas[,"LLR.p.adj"] > 0.01 & cc.eas[,"LLR.p.adj"] <=0.05, "0.01-0.05",
ifelse(cc.eas[,"LLR.p.adj"] > 0.05 & cc.eas[,"LLR.p.adj"] <=0.1, "0.05-0.1", "0.1-")))
colors=c("black","#3794bf","#df8640","gray90")
cc.eas$numSNPs = unlist(mapply(function(x,y) return(data.frame(EAS.mat.numSNPs[x,y])), cc.eas$broad_phenotype, cc.eas$histology_abbreviation))
cc.eas$numSNPs = as.character(cc.eas$numSNPs)
cc.eas$onekgSNPs =
as.numeric(cc.eas$numSNPs %>% { gsub("1KGP3\\: ", "", .) } %>% { gsub(" \\|TCGA: .*", "", .) })
cc.eas$tcgaSNPs =
as.numeric(cc.eas$numSNPs %>% { gsub(".*TCGA: ", "", .) })
cc.eas$numSNPsMean = (cc.eas$onekgSNPs + cc.eas$tcgaSNPs) / 2
cc.eas$p.cat = factor(cc.eas$p.cat)
# order of cancer type
cc.eas$broad_phenotype = factor(cc.eas$broad_phenotype,
levels=c("Alcohol_dependence","Atopic_eczema","Celiac_disease","Coronary_artery_disease","Duodenal_ulcer","Glaucoma","Graves'_disease","Kawasaki_disease","Multiple_sclerosis","Myocardial_infarction","Narcolepsy","Polycystic_ovary_syndrome","Primary_biliary_cirrhosis","Rheumatoid_arthritis","Schizophrenia","Systemic_lupus_erythematosus","Systemic_sclerosis","Type_1_diabetes","Type_2_diabetes","Uterine_leiomyoma","Vitiligo","Intracranial_aneurysm","Osteoporosis","Ulcerative_colitis","Inflammatory_bowel_disease","Nasopharyngeal_carcinoma","Lung_cancer","Endometrial_cancer","Esophageal_cancer","Thyroid_cancer","HBV-induced_hepatocellular_carcinoma"))
write.table(cc.eas, "cc.eas.txt", sep="\t", quote=F)
## cc.eas.bp and cc.eas.ha have to be in order and matched
cc.eas.bp = c("Alcohol_dependence","Atopic_eczema","Celiac_disease","Coronary_artery_disease","Duodenal_ulcer","Glaucoma","Graves'_disease","Kawasaki_disease","Multiple_sclerosis","Myocardial_infarction","Narcolepsy","Polycystic_ovary_syndrome","Primary_biliary_cirrhosis","Rheumatoid_arthritis","Schizophrenia","Systemic_lupus_erythematosus","Systemic_sclerosis","Type_1_diabetes","Type_2_diabetes","Uterine_leiomyoma","Vitiligo","Intracranial_aneurysm","Osteoporosis","Ulcerative_colitis","Inflammatory_bowel_disease","Nasopharyngeal_carcinoma","Lung_cancer","Endometrial_cancer","Esophageal_cancer","Thyroid_cancer","HBV-induced_hepatocellular_carcinoma")
cc.eas.ha = c("Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Stomach-AdenoCA","Stomach-AdenoCA","Stomach-AdenoCA","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC","Liver-HCC")
mwpp.eas = as.data.frame(t(mapply(function(x,y) mwp(LR.cancer.final, LR.1kg.final, "population==\"EAS\"", "LLR", x,y), cc.eas.ha, cc.eas.bp)))
colnames(mwpp.eas) = c("broad_phenotype","histology_abbreviation","twosided","greater","less")
mwpp.eas$risk = ifelse(as.numeric(mwpp.eas$greater) < as.numeric(mwpp.eas$less), "greater", "less")
write.table(as.matrix(mwpp.eas), "cc.eas.mwp.txt", sep="\t", quote=F)
# plot
ggplot(cc.eas, aes(x=histology_abbreviation, y=broad_phenotype, size=numSNPsMean, fill=p.cat, color=p.cat)) +
geom_point(shape = 21) +
theme(legend.position = "none",
axis.text.x = element_text(size = 18, angle = 330,
hjust = 0, color = "black"),
axis.text.y = element_text(size = 18)) +
theme(legend.position="right") +
scale_fill_manual(values = colors, na.value = "white") +
scale_color_manual(values = rep("white",3), na.value = "white") +
labs(y=paste("EAS","_broad_phenotype"),x="histology_abbreviation") +
scale_size_area(max_size = 20) +
theme(legend.title = element_text(size=15, face="bold"), legend.text=element_text(size=15))
## this add label for numbers
# + geom_text(aes(label=round(numSNPsMean)), size=5, nudge_x=0.0, nudge_y=0.6)
# ggsave("EAS_complete_cases_bubble_zm.pdf", device = "pdf", useDingbats=FALSE)
## 10.4 x 14.4
################################################################################
# ## AFR
x11(type="cairo")
plotmatrix(AFR.mat.pval.m, colp="LLR.p.adj", xfontsize=8, yfontsize=5, colors=c("#3794bf","gray90","#df8640","black"), "AFR")
# ggsave("AFR_alldz_allcancertypes_zm.pdf", device = "pdf")
x11(type="cairo")
plotmatrix(AFR.cancer.match.ss1, colp="LLR.p.adj", xfontsize=5, yfontsize=7, colors=c("gray90", "#3794bf","black","#df8640"), "AFR")
# ggsave("AFR_matched_allcancertypes_zm.pdf", device = "pdf")
## plot only complete and non-NA cases
x11(type="cairo")
AFR.mat.pval.m.new = AFR.mat.pval.m
AFR.mat.pval.m.new[,"LLR.p.adj"][AFR.mat.pval.m.new$LLR.p.adj>0.1] = NA
plotmatrix(AFR.mat.pval.m.new[complete.cases(AFR.mat.pval.m.new),], colp="LLR.p.adj", xfontsize=10, yfontsize=10, colors=c("#3794bf","black","#df8640","gray90"), "AFR")
# ggsave("AFR_complete_cases_zm.pdf", device = "pdf")
#
# ## AMR
# x11(type="cairo")
# plotmatrix(AMR.mat.pval.m, colp="LLR.p.adj", xfontsize=8, yfontsize=5, colors=c("black","#3794bf","#df8640","gray90"), "AMR")
# plotmatrix(AMR.cancer.match.ss1, colp="LLR.p.adj", xfontsize=5, yfontsize=7, colors=c("black","#df8640","gray90", "#3794bf"), "AMR")
#
# ## SAS
# x11(type="cairo")
# plotmatrix(SAS.mat.pval.m, colp="LLR.p.adj", xfontsize=8, yfontsize=5, colors=c("gray90", "black","#3794bf","#df8640"), "SAS")
# plotmatrix(SAS.cancer.match.ss1, colp="LLR.p.adj", xfontsize=5, yfontsize=7, colors=c("black","#df8640","gray90", "#3794bf"), "SAS")
# violin plots
plotviolin <- function(cancerdata, refdata, popparse, cancertypeparse, dz)
{
tcga = subset(cancerdata, eval(parse(text=popparse)) & eval(parse(text=cancertypeparse)),
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
kgp3 = subset(refdata, eval(parse(text=popparse)),
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
merged = rbind(tcga, kgp3)
## mann whitney test for melanoma
mm.1kgp3 = kgp3[kgp3$broad_phenotype==dz,]
mm.tcga = tcga[tcga$broad_phenotype==dz,]
mm.merged = merged[merged$broad_phenotype == dz,]
print(paste(popparse,"_", cancertypeparse, "_", dz, " p.val, 2-sided"))
jm1 = wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR)$p.value
print(paste(popparse,"_", cancertypeparse, "_", dz, " p.val, less"))
jm2 = wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR, alternative = "less")$p.value ## x < y
print(paste(popparse,"_", cancertypeparse, "_", dz, " p.val, greater"))
jm3 = wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR, alternative = "greater")$p.value ## x > y
## plot violin and boxplot for melanoma
pd <- position_dodge(0.9)
pmain2 = ggplot(mm.merged, aes(x=dataset, y=LLR, fill = factor(dataset)))
phisto2 = geom_violin()
phisto3 = geom_boxplot(width=.1, outlier.size=0, fill="grey50", position=pd)
phisto4 = stat_summary(fun.y=median)
ptitle = ggtitle(paste(gsub("population==","",popparse), " ", gsub("histology_abbreviation==", "", cancertypeparse)))
plabels = labs(x=dz,y="LLR distribution")
jm4 = pmain2 + phisto2 + phisto3 + phisto4 +
ptitle + plabels + theme(legend.position="none") +
scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
# +
# geom_dotplot(binaxis='y', stackdir='center', dotsize=0.1, colour = "black", fill = "black")
print(jm4)
return(list(jm1,jm2,jm3))
}
## item1: 2 sided unadj pvalue, 1sided x<y, 1sided x>y
# Melanoma-Melanoma
x11(type="cairo")
p.melanoma.melanoma = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Skin-Melanoma\"",
dz = "Melanoma"))
names(p.melanoma.melanoma) = c("2sided","1sided_less","1sided_greater")
p.melanoma.melanoma$twosided_adj.p = EUR.mat.pval.m[EUR.mat.pval.m$broad_phenotype=="Melanoma" & EUR.mat.pval.m$histology_abbreviation=="Skin-Melanoma",]$LLR.p.adj
p.melanoma.melanoma$size = EUR.mat.numSNPs["Melanoma","Skin-Melanoma"]
# ggsave("violin-melanoma.melanoma2_zm.pdf", device = "pdf", useDingbats=FALSE)
# melanoma-obesity
x11(type="cairo")
p.melanoma.obesity = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Skin-Melanoma\"",
dz = "Obesity"))
names(p.melanoma.obesity) = c("2sided","1sided_less","1sided_greater")
p.melanoma.obesity$twosided_adj.p = EUR.mat.pval.m[EUR.mat.pval.m$broad_phenotype=="Obesity" & EUR.mat.pval.m$histology_abbreviation=="Skin-Melanoma",]$LLR.p.adj
p.melanoma.obesity$size = EUR.mat.numSNPs["Obesity","Skin-Melanoma"]
# ggsave("violin-melanoma-obesity_zm.pdf", device = "pdf", useDingbats=FALSE)
# Prost-AdenoCA and Prostate_cancer
gender.1kg = subset(read.delim("1kg-sampleinfo.txt", header=T, stringsAsFactors=FALSE, sep="\t"), select=c(sampleID, Gender))
LR.1kg.final.gender = merge(LR.1kg.final, gender.1kg, by.x="sample.id", by.y="sampleID", all.x=TRUE)
LR.1kg.final.male = subset(LR.1kg.final.gender, Gender == "male")
x11(type="cairo")
p.prostate.prostate = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Prost-AdenoCA\"",
dz = "Prostate_cancer"))
# ggsave("violin-prostate-prostate_zm.pdf", device = "pdf", useDingbats=FALSE)
x11(type="cairo")
## male only
p.prostate.prostate.m = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final.male,
"population==\"EUR\"",
"histology_abbreviation==\"Prost-AdenoCA\"",
dz = "Prostate_cancer"))
names(p.prostate.prostate.m) = c("twosided","onesided_less","onesided_greater")
p.prostate.prostate.m$twosided.adj = p.prostate.prostate.m$twosided *
(cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","LLR.p.adj"] /
cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","LLR.p"] *
cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","rank"])
## male + female
names(p.prostate.prostate) = c("twosided","onesided_less","onesided_greater")
p.prostate.prostate$twosided.adj = p.prostate.prostate$twosided *
(cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","LLR.p.adj"] /
cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","LLR.p"] *
cc.eur[cc.eur$broad_phenotype == "Prostate_cancer" & cc.eur$histology_abbreviation == "Prost-AdenoCA","rank"])
# ggsave("violin-prostate-prostate-male-only_zm.pdf", device = "pdf", useDingbats=FALSE)
## to check increase or decrease in risk
as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Stomach-AdenoCA\"",
dz = "Behcet's_disease"))
dzs = c("Alcohol_dependence","Atopic_eczema","Celiac_disease","Coronary_artery_disease","Duodenal_ulcer","Glaucoma","Graves'_disease","Kawasaki_disease","Multiple_sclerosis","Myocardial_infarction","Narcolepsy","Polycystic_ovary_syndrome","Primary_biliary_cirrhosis","Rheumatoid_arthritis","Schizophrenia","Systemic_lupus_erythematosus","Systemic_sclerosis","Type_1_diabetes","Type_2_diabetes","Uterine_leiomyoma","Vitiligo","Intracranial_aneurysm","Osteoporosis","Ulcerative_colitis","Inflammatory_bowel_disease","Nasopharyngeal_carcinoma","Lung_cancer","Endometrial_cancer","Esophageal_cancer","Thyroid_cancer","HBV-induced_hepatocellular_carcinoma")
aa = sapply(dzs, function(x) plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EAS\"",
"histology_abbreviation==\"Liver-HCC\"",
dz = x))
row.names(aa) = c("2sided","1sided_less","1sided_greater_decrease")
dzs = c("Intracranial_aneurysm","Osteoporosis","Ulcerative_colitis")
ab = sapply(dzs, function(x) plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EAS\"",
"histology_abbreviation==\"Stomach-AdenoCA\"",
dz = x))
row.names(ab) = c("2sided","1sided_less","1sided_greater_decrease")
# breast_cancer and Breast-AdenoCA
x11(type="cairo")
p.breast.breast = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final,
"population==\"EUR\"",
"histology_abbreviation==\"Breast-AdenoCA\"",
dz = "Breast_cancer"))
LR.1kg.final.female = subset(LR.1kg.final.gender, Gender == "female")
x11(type="cairo")
p.breast.breast.f = as.data.frame(plotviolin(LR.cancer.final, LR.1kg.final.female,
"population==\"EUR\"",
"histology_abbreviation==\"Breast-AdenoCA\"",
dz = "Breast_cancer"))
########################
# histograms: compare (1KGp3 EUR) vs (ICGC_TCGA EUR melanoma patients) LRs for ALL VariMed diseases
tcga.EUR.melanoma = subset(LR.cancer.final, population == "EUR" & histology_abbreviation == "Skin-Melanoma",
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
kgp3.EUR.melanoma = subset(LR.1kg.final, population == "EUR",
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
merged.EUR.melanoma = rbind(tcga.EUR.melanoma, kgp3.EUR.melanoma)
## plotting
x11(type="cairo")
pmain = ggplot(tcga.EUR.melanoma[tcga.EUR.melanoma$broad_phenotype %in% dz.tcga[1:10],], aes(x=broad_phenotype, y=LLR))
phisto = geom_boxplot()
ptitle = ggtitle("Skin-Melanoma")
# pfacet = facet_wrap( ~ broad_phenotype, scales="free", ncol=1)
plabels = labs(x="broad phenotype",y="LLR distribution")
# paxes = theme(axis.title.x = element_text(face = "bold",colour = "black", size = 20),
# axis.title.y = element_text(face = "bold",colour = "black", size = 20),
# axis.text.x = element_text(size = 15), axis.text.y = element_text(size = 15))
pmain + phisto + ptitle + plabels + scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip() + geom_jitter(height = 0, width = 0.1)
## new
by=10
for (i in seq(1,length(dz.tcga),by=by))
{
pmain2 = ggplot(merged.EUR.melanoma[merged.EUR.melanoma$broad_phenotype %in% dz.tcga[i:(i+by-1)],],
aes(x=broad_phenotype, y=LLR, fill = factor(dataset)))
phisto2 = geom_boxplot(width=0.7, outlier.shape=3) ## shape 3 = '+'
j = pmain2 + phisto2 + ptitle + plabels +
scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
print(j)
}
# Mann-Whitney tests & violin plots for TCGA 'Skin-Melanoma' EUR patients for LRs for 'Melanoma' (pos) and 'Obesity' (neg) and 'Renal_cell_cancer'
## mann whitney test for melanoma
mm.1kgp3 = kgp3.EUR.melanoma[kgp3.EUR.melanoma$broad_phenotype=="Melanoma",]
mm.tcga = tcga.EUR.melanoma[tcga.EUR.melanoma$broad_phenotype=="Melanoma",]
mm.merged = merged.EUR.melanoma[merged.EUR.melanoma$broad_phenotype == "Melanoma",]
print("Melanoma-Melanoma p.val, 2-sided")
wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR)$p.value
print("Melanoma-Melanoma p.val, less")
wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR, alternative = "less")$p.value ## x < y
print("Melanoma-Melanoma p.val, greater")
wilcox.test(mm.1kgp3$LLR, mm.tcga$LLR, alternative = "greater")$p.value ## x > y
## plot violin and boxplot for melanoma
pd <- position_dodge(0.9)
pmain2 = ggplot(mm.merged, aes(x=dataset, y=LLR, fill = factor(dataset)))
phisto2 = geom_violin()
phisto3 = geom_boxplot(width=.1, outlier.size=0, fill="grey50", position=pd)
phisto4 = stat_summary(fun.y=median)
plabels = labs(x="Melanoma",y="LLR distribution")
pmain2 + phisto2 + phisto3 + phisto4 +
ptitle + plabels + theme(legend.position="none") +
scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
#---------
## mann whitney test for prostate-prostate (male-only)
tcga.EUR.prostate = subset(LR.cancer.final, population == "EUR" & histology_abbreviation == "Prost-AdenoCA",
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
kgp3.EUR.prostate = subset(LR.1kg.final.male, population == "EUR",
select=c(sample.id, population, broad_phenotype, LLR, LLR_max, dataset))
pp.1kgp3 = kgp3.EUR.prostate[kgp3.EUR.prostate$broad_phenotype=="Prostate_cancer",]
pp.tcga = tcga.EUR.prostate[tcga.EUR.prostate$broad_phenotype=="Prostate_cancer",]
print("Prost-AdenoCA-ProstateCancer p.val, 2-sided")
wilcox.test(pp.1kgp3$LLR, pp.tcga$LLR)$p.value
print("Prost-AdenoCA-ProstateCancer p.val, less")
wilcox.test(pp.1kgp3$LLR, pp.tcga$LLR, alternative = "less")$p.value ## x < y
print("Prost-AdenoCA-ProstateCancer p.val, greater")
wilcox.test(pp.1kgp3$LLR, pp.tcga$LLR, alternative = "greater")$p.value ## x > y
# ## plot violin and boxplot for Renal_cell_cancer
# pd <- position_dodge(0.9)
# pmain2 = ggplot(mr.merged, aes(x=dataset, y=LLR, fill = factor(dataset)))
# phisto2 = geom_violin()
# phisto3 = geom_boxplot(width=.1, outlier.size=0, fill="grey50", position=pd)
# phisto4 = stat_summary(fun.y=median)
# plabels = labs(x="Renal_cell_cancer",y="LLR distribution")
# pmain2 + phisto2 + phisto3 + phisto4 +
# ptitle + plabels + theme(legend.position="none") +
# scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
# ## mann whitney test for Obesity
# mo.1kgp3 = kgp3.EUR.melanoma[kgp3.EUR.melanoma$broad_phenotype=="Obesity",]
# mo.tcga = tcga.EUR.melanoma[tcga.EUR.melanoma$broad_phenotype=="Obesity",]
# mo.merged = merged.EUR.melanoma[merged.EUR.melanoma$broad_phenotype == "Obesity",]
#
#
# print("Melanoma-Obesity p.val, 2-sided")
# wilcox.test(mo.1kgp3$LLR, mo.tcga$LLR)$p.value
# print("Melanoma-Obesity p.val, less")
# wilcox.test(mo.1kgp3$LLR, mo.tcga$LLR, alternative = "less")$p.value ## x < y
# print("Melanoma-Obesity p.val, greater")
# wilcox.test(mo.1kgp3$LLR, mo.tcga$LLR, alternative = "greater")$p.value ## x > y
#
# ## plot violin and boxplots for obesity
# pmain2 = ggplot(mo.merged, aes(x=dataset, y=LLR, fill = factor(dataset)))
# phisto2 = geom_violin()
# phisto3 = geom_boxplot(width=.1, outlier.size=0, fill="grey50", position=pd)
# phisto4 = stat_summary(fun.y=median)
# plabels = labs(x="Obesity",y="LLR distribution")
# pmain2 + phisto2 + phisto3 + phisto4 +
# ptitle + plabels + theme(legend.position="none") +
# scale_color_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") + coord_flip()
|
library(shiny)
fluidPage(
# Application title
titlePanel("Biggie vs. 2Pac: Word Cloud"),
sidebarLayout(
# Sidebar with a slider and selection inputs
sidebarPanel(
selectInput("selection", "Choose a Rapper:",
choices = rappers),
actionButton("update", "Change"),
hr(),
sliderInput("freq",
"Minimum Frequency:",
min = 1, max = 50, value = 1),
sliderInput("max",
"Maximum Number of Words:",
min = 1, max = 300, value = 100)
),
# Show Word Cloud
mainPanel(
plotOutput("plot")
)
)
) | /shiny_twitter_wc/ui.R | no_license | anahitabahri/Rap-Twitter-Analysis | R | false | false | 647 | r | library(shiny)
fluidPage(
# Application title
titlePanel("Biggie vs. 2Pac: Word Cloud"),
sidebarLayout(
# Sidebar with a slider and selection inputs
sidebarPanel(
selectInput("selection", "Choose a Rapper:",
choices = rappers),
actionButton("update", "Change"),
hr(),
sliderInput("freq",
"Minimum Frequency:",
min = 1, max = 50, value = 1),
sliderInput("max",
"Maximum Number of Words:",
min = 1, max = 300, value = 100)
),
# Show Word Cloud
mainPanel(
plotOutput("plot")
)
)
) |
getmBIC <- function(K,lv,mu,CGHo){
M = length(names(mu))
N = M*mu[[1]]$end[dim(mu[[1]])[1]]
Ent = sum(unlist(lapply(mu,FUN = function(x){log(x$end-x$begin+1)})))
if (CGHo["calling"]==FALSE){
mBIC = ((N-K+1)/2)*(lv*(2/N)+1+log(2*pi))-0.5*Ent -(K-M)*log(N)+lgamma((N-K+1)/2)-((N-K+1)/2)*log(N)
} else {
P = CGHo["nblevels"]
Np = sapply(mu,FUN=function(x){
tabulate(rep(x$levels,x$end-x$begin+1),P)
})
Np = apply(Np,1,sum)
Ent = sum(log(Np))
mBIC = ((N-P+1)/2)*(lv*(2/N)+1+log(2*pi))-0.5*Ent-(K-M)*log(N)+lgamma((N-P+1)/2)-((N-P+1)/2)*log(N)
}
return(mBIC)
}
| /R/getmBIC.R | no_license | cran/cghseg | R | false | false | 609 | r | getmBIC <- function(K,lv,mu,CGHo){
M = length(names(mu))
N = M*mu[[1]]$end[dim(mu[[1]])[1]]
Ent = sum(unlist(lapply(mu,FUN = function(x){log(x$end-x$begin+1)})))
if (CGHo["calling"]==FALSE){
mBIC = ((N-K+1)/2)*(lv*(2/N)+1+log(2*pi))-0.5*Ent -(K-M)*log(N)+lgamma((N-K+1)/2)-((N-K+1)/2)*log(N)
} else {
P = CGHo["nblevels"]
Np = sapply(mu,FUN=function(x){
tabulate(rep(x$levels,x$end-x$begin+1),P)
})
Np = apply(Np,1,sum)
Ent = sum(log(Np))
mBIC = ((N-P+1)/2)*(lv*(2/N)+1+log(2*pi))-0.5*Ent-(K-M)*log(N)+lgamma((N-P+1)/2)-((N-P+1)/2)*log(N)
}
return(mBIC)
}
|
library(ggplot2)
library(grid)
library(yaml)
library(ggthemes)
library(scales)
args = commandArgs(trailingOnly = T)
options(bitmapType='cairo')
# Rscript boxplot_based_on_one_column_with_header.R <input_file> <columd id to use for group plot> <column id to use for boxplot> <output file> <pseudo chip> <title> <label prefix> <x label text> <y label text>
ks_test = function (v1, v2) {
return (ks.test(v1, v2)$p.val)
}
figure1_theme <- function (){
theme (axis.text.y =element_text(vjust =1))+
theme(plot.title=element_text( size=20 )) +
theme(axis.title.x = element_text(colour = "black", size = 20),
axis.title.y = element_text(colour = "black", size = 20)) +
theme(axis.text.x = element_text(colour = "black",size = 20),
axis.text.y = element_text(colour = "black", size = 20)) +
theme(legend.title= element_text(size = 20),
legend.text = element_text(size = 20)) +
theme(axis.title.x = element_blank())
}
################### arguments ################
dt = read.table(args[1], sep = "", header = T, stringsAsFactor = F)
#print (head(dt))
#print (args)
dt_colnames = names(dt)
col_to_group = as.numeric(args[2])
col_to_plot = as.numeric(args[3])
out_file = args[4]
pseudo_chip = as.numeric(args[5])
label_title = args[6]
label_prefix = args[7]
x_label = args[8]
y_label = args[9]
ref_for_ks_test = args[10]
min_val = as.integer(args[11])
max_val = as.integer (args[12])
col_dotted_line = as.character (args[13])
ref_box_color = as.character (args[14])
color_file = args [15]
plot_pdf_file = args[16]
plot_title = args[17]
exact_signal_file = args[18]
###############################################
dt["Mean.ChIP"] = log2(dt$`Mean.ChIP` + pseudo_chip)
dt$Cluster = as.character(dt$Cluster)
dt = dt[which(dt$Mean.ChIP > -8), ]
exact_signal_dt = read.table(exact_signal_file, sep ="\t", header = F, stringsAsFactor = F)
names(exact_signal_dt) = c("chrom_details", "Mean.ChIP")
exact_signal_dt["Mean.ChIP"] = log2 (exact_signal_dt$`Mean.ChIP` + pseudo_chip)
exact_signal_dt = exact_signal_dt[which(exact_signal_dt$Mean.ChIP > -8), ]
dt_tmp = dt [, c(col_to_group, col_to_plot)]
#dt_sub = dt [which (dt_tmp$Mean.ChIP >= min_val & dt_tmp$Mean.ChIP <=max_val), ]
dt_sub = dt_tmp
dt_sub_ggplot = dt_sub
colnames (dt_sub_ggplot) = c ("col_to_group", "col_to_plot")
dt_sub_ggplot = dt_sub_ggplot[which(dt_sub_ggplot$col_to_plot> -8), ] # discarding all the pseudo-chip value
dt["chrom_details"] = exact_signal_dt$chrom_details
#print (head(dt_sub))
dt_sub_l = lapply(unique(dt_sub[[dt_colnames[col_to_group]]]),
function (x) {
dt_sub[which(dt_sub[[dt_colnames[col_to_group] ]]==x), dt_colnames[col_to_plot] ] })
names(dt_sub_l) = unique(dt_sub[[dt_colnames[col_to_group]]])
###### Calculate p-values using ref_for_ks_test as reference ###########
all_ids = unique(dt_sub[[dt_colnames[col_to_group]]])
#cat (all_ids)
#cat ("\n")
ref_for_ks_test_v = c (args[10])
median_val_for_dotted_line = median(dt_sub[which(dt_sub$Cluster == ref_for_ks_test), "Mean.ChIP"])
query_set = sort(setdiff(all_ids, ref_for_ks_test_v))
#print(query_set)
ks_p_vals = lapply (query_set, function(x) {
ks_test(unname (dt_sub_l[[x]]), unname(dt_sub_l [[ref_for_ks_test]]) )
})
names(ks_p_vals) = query_set
#print(query_set)
print (ks_p_vals)
reassign_zeros = lapply (query_set, function (x) { if (ks_p_vals[[x]] == 0 ) {return("p < 2.2e-16") } else {return(paste0("p = ", round (ks_p_vals[[x]], 4))) }})
p_values_besides_n = lapply (query_set, function (x) { if (ks_p_vals[[x]] == 0 ) {return(paste0("p(",x,",", ref_for_ks_test, ") < 2.2e-16")) } else {return(paste0("p(", x, ",", ref_for_ks_test, ") = ", scientific(ks_p_vals[[x]], digits=2))) }})
names(reassign_zeros) = query_set
to_order_list = c(query_set, ref_for_ks_test)
#color_list = read.table(color_file, header = F, stringsAsFactor = F)$V1
#color_list = c (color_list, ref_box_color)
color_dict = yaml.load_file(color_file)
color_list = sapply(query_set, function(x) {color_dict[[x]]})
color_list = unname(c (color_list, ref_box_color))
print (color_list, stderr())
dt_sub_l_ordered = lapply(to_order_list, function (x) {dt_sub_l[[x]]})
names (dt_sub_l_ordered) = to_order_list
cat ("######################\n")
print (to_order_list)
cat ("%%%%%%%%%%%%%%%%%%%%%%\n")
print (color_list)
#ggplot_df = data.frame(dt_sub_l_ordered)
dt_sub_ggplot$col_to_group = factor(dt_sub_ggplot$col_to_group,
levels = to_order_list, ordered = T)
png(out_file, height = 10.5, width = 7, units = "in", res = 300)
#pdf(paste0(args[1], "-dist.pdf"), height = 6, width = 9)
max_points = unlist(lapply(dt_sub_l_ordered, max))
min_points = unlist(lapply(dt_sub_l_ordered, min))
median_points = unlist(lapply(dt_sub_l_ordered, median))
counts = unlist(lapply(dt_sub_l_ordered, length))
print ("############ %%%%%%%%%%%%% ############")
print (median_points)
median_df = data.frame(actual = order(median_points, decreasing = T),
assigned = seq(length(median_points)),
stringsAsFactors = F)
median_df = median_df[order(median_df$actual), ]
assigned_cl_id = unlist(lapply (dt_sub_ggplot$col_to_group, function(x){
return (median_df[median_df$actual == x, "assigned"])}))
dt_sub_ggplot["assigned_cl_id"] = as.character(assigned_cl_id)
median_val_for_dotted_line = median(dt_sub_ggplot[which(dt_sub_ggplot$assigned_cl_id == ref_for_ks_test), "col_to_plot"])
print(head(dt_sub_ggplot))
dt_sub_ggplot$assigned_cl_id = factor(dt_sub_ggplot$assigned_cl_id,
levels = to_order_list, ordered = T)
write.table(median_df, args[20], col.names = T, row.names = F, sep = "\t", quote = F)
#boxplot (dt_sub_l_ordered, main = label_title, xlab = x_label, ylab = y_label,
# frame = F, cex = 0.5, notch = T, ylim = c(min(min_points), max(max_points) + 1.5))
# get the upper and lower limits of boxplot y-axis, from boxplot stats
bxplt_dt_sub = boxplot(dt_sub_l_ordered, plot = F)
#write.table(bxplt_dt_sub, paste0(out_file, ".bxplt.tsv"))
#print (bxplt_dt_sub)
min_for_boxplot = min(bxplt_dt_sub$stats[1,])
max_for_boxplot = max(bxplt_dt_sub$stats[5,])
chip_ref = dt_sub[which(dt_sub$Cluster == ref_for_ks_test), "Mean.ChIP"]
chip_ref = chip_ref[which (chip_ref >= min_for_boxplot & chip_ref <= max_for_boxplot) ]
median_val_for_dotted_line = median(dt_sub[which(dt_sub$Cluster == ref_for_ks_test), "Mean.ChIP"])
#median_val_for_dotted_line = median(chip_ref)
kk = ggplot(dt_sub_ggplot,
aes (x = col_to_group, y = col_to_plot, fill = col_to_group, alpha = 0.5)) +
geom_boxplot(width = 0.6, notch = T, show.legend = F, position=position_dodge(width=0.6)) +
xlab (x_label) + ylab (y_label) +
ylim (c (min_for_boxplot, max_for_boxplot)) +
geom_rangeframe() + theme_few() +
ggtitle(plot_title) + theme(plot.title = element_text(hjust = 0.5)) +
#scale_x_discrete (limits = to_order_list) +
scale_fill_manual (breaks = to_order_list, values = color_list) +
scale_alpha(guide = 'none') + figure1_theme() +
scale_y_continuous(labels = scales::number_format(accuracy = 0.1))
# n = on besides each box
for ( i in seq(length(dt_sub_l_ordered)) ){
#grob <- grobTree(textGrob( paste0("n = ", counts[i]), x= i - 0.5, y=max_points[i]/2, hjust=0, gp=gpar(col="red", fontsize=13, fontface="italic")))
if (i < length(dt_sub_l_ordered)){
kk = kk + annotate ("text", x = i - 0.5, y = median_points[i] + 0.25, #max_points[i]/2,
label = paste0("n = ", counts[i], ", ", p_values_besides_n[i]),
angle = 90, size = 5, hjust = 0)}
else{
kk = kk + annotate ("text", x = i - 0.5, y = median_points[i] + 0.25, # max_points[i]/2,
label = paste0("n = ", counts[i]), angle = 90, size = 5, hjust=0)
}
#kk = kk + annotation_custom(grob)
}
# put p-values and lines
last = length(dt_sub_l_ordered)
last_minus_one = last - 1
#for (i in seq (last_minus_one)) {
# kk = kk + geom_segment(x = i, y = max(max_points[i], max_points[last]),
# xend = i, yend = max(max_points[i], max_points[last]) + (last - i)*0.5) +
# geom_segment(x = last, y = max(max_points[i], max_points[last]),
# xend = last, yend = max(max_points[i], max_points[last]) + (last - i)*0.5) +
# geom_segment(x = i, y = max(max_points[i], max_points[last]) + (last -i)*0.5,
# xend = last, yend = max(max_points[i], max_points[last]) + (last - i)*0.5) +
# annotate ("text", (last + i)/2, max(max_points[i], max_points[last]) + (last - i)*0.5 + 0.25,
# label = reassign_zeros[[as.character(i)]], size = 4)
#}
kk = kk + geom_hline (yintercept=median_val_for_dotted_line,
col = col_dotted_line, lty = 2, size = 1)
print (kk)
#legend ("topright", legend = line_labels_vec, col = color_vals, lty = line_vals)
dev.off()
pdf(args[16], height = 10.5, width = 7)
print (kk)
dev.off()
### making it dummy as of now ######
png(args[19], width = 5, height = 9.5, units = "in", res = 150)
plot(seq(1,5), seq(1,5))
dev.off()
| /scripts/boxplot_chip_no_negative.R | no_license | satyanarayan-rao/tf_nucleosome_dynamics | R | false | false | 9,169 | r | library(ggplot2)
library(grid)
library(yaml)
library(ggthemes)
library(scales)
args = commandArgs(trailingOnly = T)
options(bitmapType='cairo')
# Rscript boxplot_based_on_one_column_with_header.R <input_file> <columd id to use for group plot> <column id to use for boxplot> <output file> <pseudo chip> <title> <label prefix> <x label text> <y label text>
ks_test = function (v1, v2) {
return (ks.test(v1, v2)$p.val)
}
figure1_theme <- function (){
theme (axis.text.y =element_text(vjust =1))+
theme(plot.title=element_text( size=20 )) +
theme(axis.title.x = element_text(colour = "black", size = 20),
axis.title.y = element_text(colour = "black", size = 20)) +
theme(axis.text.x = element_text(colour = "black",size = 20),
axis.text.y = element_text(colour = "black", size = 20)) +
theme(legend.title= element_text(size = 20),
legend.text = element_text(size = 20)) +
theme(axis.title.x = element_blank())
}
################### arguments ################
dt = read.table(args[1], sep = "", header = T, stringsAsFactor = F)
#print (head(dt))
#print (args)
dt_colnames = names(dt)
col_to_group = as.numeric(args[2])
col_to_plot = as.numeric(args[3])
out_file = args[4]
pseudo_chip = as.numeric(args[5])
label_title = args[6]
label_prefix = args[7]
x_label = args[8]
y_label = args[9]
ref_for_ks_test = args[10]
min_val = as.integer(args[11])
max_val = as.integer (args[12])
col_dotted_line = as.character (args[13])
ref_box_color = as.character (args[14])
color_file = args [15]
plot_pdf_file = args[16]
plot_title = args[17]
exact_signal_file = args[18]
###############################################
dt["Mean.ChIP"] = log2(dt$`Mean.ChIP` + pseudo_chip)
dt$Cluster = as.character(dt$Cluster)
dt = dt[which(dt$Mean.ChIP > -8), ]
exact_signal_dt = read.table(exact_signal_file, sep ="\t", header = F, stringsAsFactor = F)
names(exact_signal_dt) = c("chrom_details", "Mean.ChIP")
exact_signal_dt["Mean.ChIP"] = log2 (exact_signal_dt$`Mean.ChIP` + pseudo_chip)
exact_signal_dt = exact_signal_dt[which(exact_signal_dt$Mean.ChIP > -8), ]
dt_tmp = dt [, c(col_to_group, col_to_plot)]
#dt_sub = dt [which (dt_tmp$Mean.ChIP >= min_val & dt_tmp$Mean.ChIP <=max_val), ]
dt_sub = dt_tmp
dt_sub_ggplot = dt_sub
colnames (dt_sub_ggplot) = c ("col_to_group", "col_to_plot")
dt_sub_ggplot = dt_sub_ggplot[which(dt_sub_ggplot$col_to_plot> -8), ] # discarding all the pseudo-chip value
dt["chrom_details"] = exact_signal_dt$chrom_details
#print (head(dt_sub))
dt_sub_l = lapply(unique(dt_sub[[dt_colnames[col_to_group]]]),
function (x) {
dt_sub[which(dt_sub[[dt_colnames[col_to_group] ]]==x), dt_colnames[col_to_plot] ] })
names(dt_sub_l) = unique(dt_sub[[dt_colnames[col_to_group]]])
###### Calculate p-values using ref_for_ks_test as reference ###########
all_ids = unique(dt_sub[[dt_colnames[col_to_group]]])
#cat (all_ids)
#cat ("\n")
ref_for_ks_test_v = c (args[10])
median_val_for_dotted_line = median(dt_sub[which(dt_sub$Cluster == ref_for_ks_test), "Mean.ChIP"])
query_set = sort(setdiff(all_ids, ref_for_ks_test_v))
#print(query_set)
ks_p_vals = lapply (query_set, function(x) {
ks_test(unname (dt_sub_l[[x]]), unname(dt_sub_l [[ref_for_ks_test]]) )
})
names(ks_p_vals) = query_set
#print(query_set)
print (ks_p_vals)
reassign_zeros = lapply (query_set, function (x) { if (ks_p_vals[[x]] == 0 ) {return("p < 2.2e-16") } else {return(paste0("p = ", round (ks_p_vals[[x]], 4))) }})
p_values_besides_n = lapply (query_set, function (x) { if (ks_p_vals[[x]] == 0 ) {return(paste0("p(",x,",", ref_for_ks_test, ") < 2.2e-16")) } else {return(paste0("p(", x, ",", ref_for_ks_test, ") = ", scientific(ks_p_vals[[x]], digits=2))) }})
names(reassign_zeros) = query_set
to_order_list = c(query_set, ref_for_ks_test)
#color_list = read.table(color_file, header = F, stringsAsFactor = F)$V1
#color_list = c (color_list, ref_box_color)
color_dict = yaml.load_file(color_file)
color_list = sapply(query_set, function(x) {color_dict[[x]]})
color_list = unname(c (color_list, ref_box_color))
print (color_list, stderr())
dt_sub_l_ordered = lapply(to_order_list, function (x) {dt_sub_l[[x]]})
names (dt_sub_l_ordered) = to_order_list
cat ("######################\n")
print (to_order_list)
cat ("%%%%%%%%%%%%%%%%%%%%%%\n")
print (color_list)
#ggplot_df = data.frame(dt_sub_l_ordered)
dt_sub_ggplot$col_to_group = factor(dt_sub_ggplot$col_to_group,
levels = to_order_list, ordered = T)
png(out_file, height = 10.5, width = 7, units = "in", res = 300)
#pdf(paste0(args[1], "-dist.pdf"), height = 6, width = 9)
max_points = unlist(lapply(dt_sub_l_ordered, max))
min_points = unlist(lapply(dt_sub_l_ordered, min))
median_points = unlist(lapply(dt_sub_l_ordered, median))
counts = unlist(lapply(dt_sub_l_ordered, length))
print ("############ %%%%%%%%%%%%% ############")
print (median_points)
median_df = data.frame(actual = order(median_points, decreasing = T),
assigned = seq(length(median_points)),
stringsAsFactors = F)
median_df = median_df[order(median_df$actual), ]
assigned_cl_id = unlist(lapply (dt_sub_ggplot$col_to_group, function(x){
return (median_df[median_df$actual == x, "assigned"])}))
dt_sub_ggplot["assigned_cl_id"] = as.character(assigned_cl_id)
median_val_for_dotted_line = median(dt_sub_ggplot[which(dt_sub_ggplot$assigned_cl_id == ref_for_ks_test), "col_to_plot"])
print(head(dt_sub_ggplot))
dt_sub_ggplot$assigned_cl_id = factor(dt_sub_ggplot$assigned_cl_id,
levels = to_order_list, ordered = T)
write.table(median_df, args[20], col.names = T, row.names = F, sep = "\t", quote = F)
#boxplot (dt_sub_l_ordered, main = label_title, xlab = x_label, ylab = y_label,
# frame = F, cex = 0.5, notch = T, ylim = c(min(min_points), max(max_points) + 1.5))
# get the upper and lower limits of boxplot y-axis, from boxplot stats
bxplt_dt_sub = boxplot(dt_sub_l_ordered, plot = F)
#write.table(bxplt_dt_sub, paste0(out_file, ".bxplt.tsv"))
#print (bxplt_dt_sub)
min_for_boxplot = min(bxplt_dt_sub$stats[1,])
max_for_boxplot = max(bxplt_dt_sub$stats[5,])
chip_ref = dt_sub[which(dt_sub$Cluster == ref_for_ks_test), "Mean.ChIP"]
chip_ref = chip_ref[which (chip_ref >= min_for_boxplot & chip_ref <= max_for_boxplot) ]
median_val_for_dotted_line = median(dt_sub[which(dt_sub$Cluster == ref_for_ks_test), "Mean.ChIP"])
#median_val_for_dotted_line = median(chip_ref)
kk = ggplot(dt_sub_ggplot,
aes (x = col_to_group, y = col_to_plot, fill = col_to_group, alpha = 0.5)) +
geom_boxplot(width = 0.6, notch = T, show.legend = F, position=position_dodge(width=0.6)) +
xlab (x_label) + ylab (y_label) +
ylim (c (min_for_boxplot, max_for_boxplot)) +
geom_rangeframe() + theme_few() +
ggtitle(plot_title) + theme(plot.title = element_text(hjust = 0.5)) +
#scale_x_discrete (limits = to_order_list) +
scale_fill_manual (breaks = to_order_list, values = color_list) +
scale_alpha(guide = 'none') + figure1_theme() +
scale_y_continuous(labels = scales::number_format(accuracy = 0.1))
# n = on besides each box
for ( i in seq(length(dt_sub_l_ordered)) ){
#grob <- grobTree(textGrob( paste0("n = ", counts[i]), x= i - 0.5, y=max_points[i]/2, hjust=0, gp=gpar(col="red", fontsize=13, fontface="italic")))
if (i < length(dt_sub_l_ordered)){
kk = kk + annotate ("text", x = i - 0.5, y = median_points[i] + 0.25, #max_points[i]/2,
label = paste0("n = ", counts[i], ", ", p_values_besides_n[i]),
angle = 90, size = 5, hjust = 0)}
else{
kk = kk + annotate ("text", x = i - 0.5, y = median_points[i] + 0.25, # max_points[i]/2,
label = paste0("n = ", counts[i]), angle = 90, size = 5, hjust=0)
}
#kk = kk + annotation_custom(grob)
}
# put p-values and lines
last = length(dt_sub_l_ordered)
last_minus_one = last - 1
#for (i in seq (last_minus_one)) {
# kk = kk + geom_segment(x = i, y = max(max_points[i], max_points[last]),
# xend = i, yend = max(max_points[i], max_points[last]) + (last - i)*0.5) +
# geom_segment(x = last, y = max(max_points[i], max_points[last]),
# xend = last, yend = max(max_points[i], max_points[last]) + (last - i)*0.5) +
# geom_segment(x = i, y = max(max_points[i], max_points[last]) + (last -i)*0.5,
# xend = last, yend = max(max_points[i], max_points[last]) + (last - i)*0.5) +
# annotate ("text", (last + i)/2, max(max_points[i], max_points[last]) + (last - i)*0.5 + 0.25,
# label = reassign_zeros[[as.character(i)]], size = 4)
#}
kk = kk + geom_hline (yintercept=median_val_for_dotted_line,
col = col_dotted_line, lty = 2, size = 1)
print (kk)
#legend ("topright", legend = line_labels_vec, col = color_vals, lty = line_vals)
dev.off()
pdf(args[16], height = 10.5, width = 7)
print (kk)
dev.off()
### making it dummy as of now ######
png(args[19], width = 5, height = 9.5, units = "in", res = 150)
plot(seq(1,5), seq(1,5))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels_warning.R
\name{labels_warning}
\alias{labels_warning}
\title{Get description label for a protein architecture identifier}
\usage{
labels_warning(arch_ids)
}
\arguments{
\item{arch_ids}{A string with the architecture electronic identifiers.}
}
\description{
Parses the architecture identifier and extract the corresponding labels.
}
\author{
Jose V. Die
}
| /man/labels_warning.Rd | permissive | NCBI-Hackathons/GeneHummus | R | false | true | 441 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels_warning.R
\name{labels_warning}
\alias{labels_warning}
\title{Get description label for a protein architecture identifier}
\usage{
labels_warning(arch_ids)
}
\arguments{
\item{arch_ids}{A string with the architecture electronic identifiers.}
}
\description{
Parses the architecture identifier and extract the corresponding labels.
}
\author{
Jose V. Die
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ns.R
\name{step_ns}
\alias{step_ns}
\alias{tidy.step_ns}
\title{Nature Spline Basis Functions}
\usage{
step_ns(
recipe,
...,
role = "predictor",
trained = FALSE,
objects = NULL,
deg_free = 2,
options = list(),
skip = FALSE,
id = rand_id("ns")
)
\method{tidy}{step_ns}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose which
variables are affected by the step. See \code{\link[=selections]{selections()}}
for more details. For the \code{tidy} method, these are not
currently used.}
\item{role}{For model terms created by this step, what analysis
role should they be assigned?. By default, the function assumes
that the new columns created from the original variables will be
used as predictors in a model.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{objects}{A list of \code{\link[splines:ns]{splines::ns()}} objects
created once the step has been trained.}
\item{deg_free}{The degrees of freedom.}
\item{options}{A list of options for \code{\link[splines:ns]{splines::ns()}}
which should not include \code{x} or \code{df}.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked
when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_ns} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} which is
the columns that will be affected and \code{holiday}.
}
\description{
\code{step_ns} creates a \emph{specification} of a recipe step
that will create new columns that are basis expansions of
variables using natural splines.
}
\details{
\code{step_ns} can new features from a single variable
that enable fitting routines to model this variable in a
nonlinear manner. The extent of the possible nonlinearity is
determined by the \code{df} or \code{knot} arguments of
\code{\link[splines:ns]{splines::ns()}}. The original variables are removed
from the data and new columns are added. The naming convention
for the new variables is \code{varname_ns_1} and so on.
}
\examples{
data(biomass)
biomass_tr <- biomass[biomass$dataset == "Training",]
biomass_te <- biomass[biomass$dataset == "Testing",]
rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr)
with_splines <- rec \%>\%
step_ns(carbon, hydrogen)
with_splines <- prep(with_splines, training = biomass_tr)
expanded <- bake(with_splines, biomass_te)
expanded
}
\seealso{
\code{\link[=step_poly]{step_poly()}} \code{\link[=recipe]{recipe()}}
\code{\link[=prep.recipe]{prep.recipe()}} \code{\link[=bake.recipe]{bake.recipe()}}
}
\concept{basis_expansion}
\concept{preprocessing}
\keyword{datagen}
| /man/step_ns.Rd | no_license | jyuu/recipes | R | false | true | 3,330 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ns.R
\name{step_ns}
\alias{step_ns}
\alias{tidy.step_ns}
\title{Nature Spline Basis Functions}
\usage{
step_ns(
recipe,
...,
role = "predictor",
trained = FALSE,
objects = NULL,
deg_free = 2,
options = list(),
skip = FALSE,
id = rand_id("ns")
)
\method{tidy}{step_ns}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose which
variables are affected by the step. See \code{\link[=selections]{selections()}}
for more details. For the \code{tidy} method, these are not
currently used.}
\item{role}{For model terms created by this step, what analysis
role should they be assigned?. By default, the function assumes
that the new columns created from the original variables will be
used as predictors in a model.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{objects}{A list of \code{\link[splines:ns]{splines::ns()}} objects
created once the step has been trained.}
\item{deg_free}{The degrees of freedom.}
\item{options}{A list of options for \code{\link[splines:ns]{splines::ns()}}
which should not include \code{x} or \code{df}.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked
when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_ns} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} which is
the columns that will be affected and \code{holiday}.
}
\description{
\code{step_ns} creates a \emph{specification} of a recipe step
that will create new columns that are basis expansions of
variables using natural splines.
}
\details{
\code{step_ns} can new features from a single variable
that enable fitting routines to model this variable in a
nonlinear manner. The extent of the possible nonlinearity is
determined by the \code{df} or \code{knot} arguments of
\code{\link[splines:ns]{splines::ns()}}. The original variables are removed
from the data and new columns are added. The naming convention
for the new variables is \code{varname_ns_1} and so on.
}
\examples{
data(biomass)
biomass_tr <- biomass[biomass$dataset == "Training",]
biomass_te <- biomass[biomass$dataset == "Testing",]
rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr)
with_splines <- rec \%>\%
step_ns(carbon, hydrogen)
with_splines <- prep(with_splines, training = biomass_tr)
expanded <- bake(with_splines, biomass_te)
expanded
}
\seealso{
\code{\link[=step_poly]{step_poly()}} \code{\link[=recipe]{recipe()}}
\code{\link[=prep.recipe]{prep.recipe()}} \code{\link[=bake.recipe]{bake.recipe()}}
}
\concept{basis_expansion}
\concept{preprocessing}
\keyword{datagen}
|
#' @export
#' @importFrom httr GET
ApiAppSettingsGet <- function() {
base_url <- "https://api.lykkex.com/api/"
get_url <- paste0(base_url, "AppSettings", "?")
res <- GET(get_url)
return(content(res))
}
| /R/ApiAppSettingsGet.r | no_license | voigtstefan/lykke | R | false | false | 228 | r | #' @export
#' @importFrom httr GET
ApiAppSettingsGet <- function() {
base_url <- "https://api.lykkex.com/api/"
get_url <- paste0(base_url, "AppSettings", "?")
res <- GET(get_url)
return(content(res))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcmc.R
\name{clean_state}
\alias{clean_state}
\title{Remove the derived quantities from state}
\usage{
clean_state(state)
}
\arguments{
\item{state}{A moprobit state container.}
}
\value{
The state container with derived quantities removed.
}
\description{
Remove the derived quantities from state
}
\seealso{
\code{\link[=derive_state]{derive_state()}}
}
| /man/clean_state.Rd | no_license | mculbert/moprobit | R | false | true | 434 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcmc.R
\name{clean_state}
\alias{clean_state}
\title{Remove the derived quantities from state}
\usage{
clean_state(state)
}
\arguments{
\item{state}{A moprobit state container.}
}
\value{
The state container with derived quantities removed.
}
\description{
Remove the derived quantities from state
}
\seealso{
\code{\link[=derive_state]{derive_state()}}
}
|
library(cellWise)
### Name: estLocScale
### Title: Estimate robust location and scale
### Aliases: estLocScale
### ** Examples
library(MASS)
set.seed(12345)
n = 100; d = 10
X = mvrnorm(n, rep(0, 10), diag(10))
locScale = estLocScale(X)
| /data/genthat_extracted_code/cellWise/examples/estLocScale.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 245 | r | library(cellWise)
### Name: estLocScale
### Title: Estimate robust location and scale
### Aliases: estLocScale
### ** Examples
library(MASS)
set.seed(12345)
n = 100; d = 10
X = mvrnorm(n, rep(0, 10), diag(10))
locScale = estLocScale(X)
|
\name{ApplyNaiveBayesClassifier}
\alias{ApplyNaiveBayesClassifier}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
ApplyNaiveBayesClassifier
}
\description{
Anwendung eines Naiven Bayes Klassifiers:
}
\usage{
ApplyNaiveBayesClassifiern(Data,UniqueClasses,MeanPerClass,StdPerClass,WeightsPerClass, PlotIt=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Data}{
(1:d,1:n) the data n-dimensional data, cases in rows
}
\item{UniqueClasses}{
(1:NrOfClasses) Klassenbeziechnungen
} \item{MeanPerClass}{
(1:NrOfClasses,1:n) klassenbezogene Mittelwerte fuer jede Variable
} \item{StdPerClass}{
(1:NrOfClasses,1:n) klassenbezogene Std fuer jede Variable
} \item{WeightsPerClass}{
(1:NrOfClasses) relative Klassengroesse
}
\item{PlotIt}{
==T (default) plots are made
}
}
\value{
BayesCls(1:d) die Klassifizierung des naive bayes Klassifikators
}
\author{
Michael Thrun
}
| /DbtTools/classifiers/man/ApplyNaiveBayesClassifier.Rd | no_license | markus-flicke/KD_Projekt_1 | R | false | false | 1,023 | rd | \name{ApplyNaiveBayesClassifier}
\alias{ApplyNaiveBayesClassifier}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
ApplyNaiveBayesClassifier
}
\description{
Anwendung eines Naiven Bayes Klassifiers:
}
\usage{
ApplyNaiveBayesClassifiern(Data,UniqueClasses,MeanPerClass,StdPerClass,WeightsPerClass, PlotIt=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Data}{
(1:d,1:n) the data n-dimensional data, cases in rows
}
\item{UniqueClasses}{
(1:NrOfClasses) Klassenbeziechnungen
} \item{MeanPerClass}{
(1:NrOfClasses,1:n) klassenbezogene Mittelwerte fuer jede Variable
} \item{StdPerClass}{
(1:NrOfClasses,1:n) klassenbezogene Std fuer jede Variable
} \item{WeightsPerClass}{
(1:NrOfClasses) relative Klassengroesse
}
\item{PlotIt}{
==T (default) plots are made
}
}
\value{
BayesCls(1:d) die Klassifizierung des naive bayes Klassifikators
}
\author{
Michael Thrun
}
|
####################################################################3
# Gather results and plot model diagnostics
library(brms)
library(tidyverse)
library(performance, lib.loc = "/projappl/project_2003061/Rpackages/")
library(bayesplot)
library(tidybayes, lib.loc = "/projappl/project_2003061/Rpackages/")
library(posterior)
res <- read_csv("output/model_weights.csv")
bests <- res %>% group_by(traits, species) %>%
slice_max(loo_weight) %>% as.data.frame()
files <- paste0(bests$mod, "_.rds")
slopes <- tibble()
r2s <- tibble()
pdf("visuals/BRMS_diagnostics.pdf", 12, 10)
for(i in files){
print(i)
spec <- gsub(".rds","",str_split(i, "_")[[1]][2])
trait <- str_split(i, "_")[[1]][1]
m1 <- readRDS(paste0("./models/",i))
slopes <- m1 %>%
gather_draws(`b_.*`, regex=TRUE) %>%
filter(.variable %in% c("b_scd", "b_snow_depth", "b_moist_mean_7_8",
"b_T3_mean_7_8","b_T1_mean_7_8")) %>%
mutate(trait = trait,
species = spec) %>%
bind_rows(slopes, .)
print(mcmc_plot(m1, type = "trace") + ggtitle(paste0(trait, " ", spec)))
print(mcmc_plot(m1, type = "hist") + ggtitle(paste0(trait, " ", spec)))
rhat_vals <- bayesplot::rhat(m1)
print(mcmc_rhat(rhat_vals[1:10]) + theme_bw() + ggtitle(paste0(trait, " ", spec)))
neff_vals <- neff_ratio(m1)
print(mcmc_neff(neff_vals[1:10]) + theme_bw() + ggtitle(paste0(trait, " ", spec)))
print(mcmc_acf(as_draws_df(m1) %>% select(starts_with("b_"))) + ggtitle(paste0(trait, " ", spec)))
print(brms::pp_check(m1, ndraws = 100) + ggtitle(paste0(trait, " ", spec)))
r2s <- bind_rows(r2s,
bind_rows(bayes_R2(m1, re.form = NULL) %>% as_tibble() %>% mutate(type = "fit_total"),
bayes_R2(m1, re.form = NA) %>% as_tibble() %>% mutate(type = "fit_fixed")) %>%
mutate(species = spec, trait = trait,
n_obs = nrow(m1$data)),
bind_rows(loo_R2(m1, re.form = NULL) %>% as_tibble() %>% mutate(type = "loo_total"),
loo_R2(m1, re.form = NA) %>% as_tibble() %>% mutate(type = "loo_fixed")) %>%
mutate(species = spec, trait = trait,
n_obs = nrow(m1$data)))
m1 <- add_criterion(m1, c("loo"))
print(tibble(pareto_k = m1$criteria$loo$diagnostics$pareto_k) %>%
ggplot(aes(x = pareto_k)) +
geom_vline(xintercept = .5, linetype = 2) +
stat_dots() + ggtitle(paste0(trait, " ", spec)))
}
dev.off()
write_csv(r2s, "output/r2s.csv")
write_csv(slopes, "output/slopes.csv")
| /scripts/08_gather_modelling_results.R | no_license | poniitty/ITV_grids | R | false | false | 2,634 | r | ####################################################################3
# Gather results and plot model diagnostics
library(brms)
library(tidyverse)
library(performance, lib.loc = "/projappl/project_2003061/Rpackages/")
library(bayesplot)
library(tidybayes, lib.loc = "/projappl/project_2003061/Rpackages/")
library(posterior)
res <- read_csv("output/model_weights.csv")
bests <- res %>% group_by(traits, species) %>%
slice_max(loo_weight) %>% as.data.frame()
files <- paste0(bests$mod, "_.rds")
slopes <- tibble()
r2s <- tibble()
pdf("visuals/BRMS_diagnostics.pdf", 12, 10)
for(i in files){
print(i)
spec <- gsub(".rds","",str_split(i, "_")[[1]][2])
trait <- str_split(i, "_")[[1]][1]
m1 <- readRDS(paste0("./models/",i))
slopes <- m1 %>%
gather_draws(`b_.*`, regex=TRUE) %>%
filter(.variable %in% c("b_scd", "b_snow_depth", "b_moist_mean_7_8",
"b_T3_mean_7_8","b_T1_mean_7_8")) %>%
mutate(trait = trait,
species = spec) %>%
bind_rows(slopes, .)
print(mcmc_plot(m1, type = "trace") + ggtitle(paste0(trait, " ", spec)))
print(mcmc_plot(m1, type = "hist") + ggtitle(paste0(trait, " ", spec)))
rhat_vals <- bayesplot::rhat(m1)
print(mcmc_rhat(rhat_vals[1:10]) + theme_bw() + ggtitle(paste0(trait, " ", spec)))
neff_vals <- neff_ratio(m1)
print(mcmc_neff(neff_vals[1:10]) + theme_bw() + ggtitle(paste0(trait, " ", spec)))
print(mcmc_acf(as_draws_df(m1) %>% select(starts_with("b_"))) + ggtitle(paste0(trait, " ", spec)))
print(brms::pp_check(m1, ndraws = 100) + ggtitle(paste0(trait, " ", spec)))
r2s <- bind_rows(r2s,
bind_rows(bayes_R2(m1, re.form = NULL) %>% as_tibble() %>% mutate(type = "fit_total"),
bayes_R2(m1, re.form = NA) %>% as_tibble() %>% mutate(type = "fit_fixed")) %>%
mutate(species = spec, trait = trait,
n_obs = nrow(m1$data)),
bind_rows(loo_R2(m1, re.form = NULL) %>% as_tibble() %>% mutate(type = "loo_total"),
loo_R2(m1, re.form = NA) %>% as_tibble() %>% mutate(type = "loo_fixed")) %>%
mutate(species = spec, trait = trait,
n_obs = nrow(m1$data)))
m1 <- add_criterion(m1, c("loo"))
print(tibble(pareto_k = m1$criteria$loo$diagnostics$pareto_k) %>%
ggplot(aes(x = pareto_k)) +
geom_vline(xintercept = .5, linetype = 2) +
stat_dots() + ggtitle(paste0(trait, " ", spec)))
}
dev.off()
write_csv(r2s, "output/r2s.csv")
write_csv(slopes, "output/slopes.csv")
|
op <- function(Printattava_teksti){
full.fpath <- tryCatch(normalizePath(parent.frame(2)$ofile), # works when using source
error=function(e) # works when using R CMD
normalizePath(unlist(strsplit(commandArgs()[grep('^--file=', commandArgs())], '='))[2]))
#print(paste0(Printattava_teksti, ": ", full.fpath))
return(full.fpath)
}
op()
| /dev_tools/get_current_file_location.R | no_license | Laurigit/mstat | R | false | false | 384 | r | op <- function(Printattava_teksti){
full.fpath <- tryCatch(normalizePath(parent.frame(2)$ofile), # works when using source
error=function(e) # works when using R CMD
normalizePath(unlist(strsplit(commandArgs()[grep('^--file=', commandArgs())], '='))[2]))
#print(paste0(Printattava_teksti, ": ", full.fpath))
return(full.fpath)
}
op()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{calc_lda_r2}
\alias{calc_lda_r2}
\title{Calculate R-squared for a tidylda Model}
\usage{
calc_lda_r2(dtm, theta, phi, batch_size, ...)
}
\arguments{
\item{dtm}{must be of class dgCMatrix}
\item{theta}{a theta matrix}
\item{phi}{a phi matrix}
\item{batch_size}{for parallel processing}
\item{...}{other arguments passed to \code{\link[furrr]{future_map}}}
}
\description{
Formats inputs and hands off to mvrsquared::calc_rsquared
}
\keyword{internal}
| /man/calc_lda_r2.Rd | permissive | knapply/tidylda | R | false | true | 546 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{calc_lda_r2}
\alias{calc_lda_r2}
\title{Calculate R-squared for a tidylda Model}
\usage{
calc_lda_r2(dtm, theta, phi, batch_size, ...)
}
\arguments{
\item{dtm}{must be of class dgCMatrix}
\item{theta}{a theta matrix}
\item{phi}{a phi matrix}
\item{batch_size}{for parallel processing}
\item{...}{other arguments passed to \code{\link[furrr]{future_map}}}
}
\description{
Formats inputs and hands off to mvrsquared::calc_rsquared
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FilterDFNA.R
\name{FilterDFNA}
\alias{FilterDFNA}
\title{FilterDFNA -- a fast function to check and filter cases based on bad behavioral data. Used when fastSwE is set to true}
\usage{
FilterDFNA(external_df, notation)
}
\arguments{
\item{external_df}{The data frame used to extract data}
\item{notation}{The formula specified by the user}
}
\description{
This function is used to extract data from a data frame and convert to a design matrix. The matrix is then input into SwE for fast computations.
}
\examples{
X <- FilterDFNA(external_df,notation)
}
\keyword{estimator}
\keyword{fast}
\keyword{marginal}
\keyword{matrix}
\keyword{model}
\keyword{sandwich}
| /man/FilterDFNA.Rd | no_license | tjhendrickson/MarginalModelCIFTI | R | false | true | 739 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FilterDFNA.R
\name{FilterDFNA}
\alias{FilterDFNA}
\title{FilterDFNA -- a fast function to check and filter cases based on bad behavioral data. Used when fastSwE is set to true}
\usage{
FilterDFNA(external_df, notation)
}
\arguments{
\item{external_df}{The data frame used to extract data}
\item{notation}{The formula specified by the user}
}
\description{
This function is used to extract data from a data frame and convert to a design matrix. The matrix is then input into SwE for fast computations.
}
\examples{
X <- FilterDFNA(external_df,notation)
}
\keyword{estimator}
\keyword{fast}
\keyword{marginal}
\keyword{matrix}
\keyword{model}
\keyword{sandwich}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 7568
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7568
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_ntrivil_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2130
c no.of clauses 7568
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 7568
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_ntrivil_1344.qdimacs 2130 7568 E1 [] 0 16 2114 7568 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_ntrivil_1344/query09_ntrivil_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 711 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 7568
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7568
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_ntrivil_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2130
c no.of clauses 7568
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 7568
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_ntrivil_1344.qdimacs 2130 7568 E1 [] 0 16 2114 7568 NONE
|
initialize <- function(filepath)
{
#if(!("fields" %in% rownames(installed.packages())))
#{
# require("fields")
#}
library("fields")
mydata1 <- read.csv(filepath)
menu(mydata1)
}
#--------(A)----Displaying the Lat and Long Co-ordinates-------
menu <- function(mydata1)
{
lat <- mydata1$lat
lon <- mydata1$long
coord <- cbind(lat,lon)
#--------(C)-------Correlation of the data----------
cor_dim <- cor(mydata1["lat"],mydata1["long"])
#--------(D)-------Mean of the Variables--------
mean_lat <- mean(mydata1$lat)
mean_long <- mean(mydata1$long)
mat1 <- matrix(c(mydata1$long,mydata1$lat),nrow=length(mydata1$long))
mat2 <- matrix(c(mean_long,mean_lat),nrow=1,ncol=2)
colnames(mat2) <- c("long", "lat")
#-------------Distance measures-----------
euclid <- rdist(mat2,mat1)
covar <- cov(mat1,mat1)
maha <- mahalanobis(mat1, mat2, covar, inverted = FALSE)
cityblock <- matrix(c(0),nrow=length(mydata1$lat))
for(i in 1:nrow(mat1))
{
cityblock[i] = abs(mat1[i,1] - mat2[1])+ abs(mat1[i,2] - mat2[2])
}
#dist(rbind(mat1,mat2), method = "minkowski", diag = FALSE, upper = FALSE, p = 3)
minwok <- matrix(c(0),nrow=length(mydata1$lat))
for(i in 1:nrow(mat1))
{
minwok[i] = ((abs(mat1[i,1] - mat2[1]))^3 + (abs(mat1[i,2] - mat2[2]))^3)^(1/3)
}
cheby <- matrix(c(0),nrow=length(mydata1$lat))
for(i in 1:nrow(mat1))
{
cheby[i] = max(abs(mat1[i,1] - mat2[1]), abs(mat1[i,2] - mat2[2]))
}
library("lsa")
cosi <- matrix(c(0),nrow=length(mydata1$lat))
for(i in 1:nrow(mat1))
{
cosi[i] = 1 - ((mat2[1]*mat1[i,1]+mat2[2]*mat1[i,2])/ (sqrt(abs(mat1[i,1]^2)+abs(mat1[i,2]^2)) *sqrt(abs(mat2[1]^2)+abs(mat2[2]^2))))
}
#-------Marking the 10 closest Points------------
euc_close <- which(euclid %in% sort(euclid)[1:10])
maha_close <- which(maha %in% sort(maha)[1:10])
city_close <- which(cityblock %in% sort(cityblock)[1:10])
min_close <- which(minwok %in% sort(minwok)[1:10])
cheby_close <- which(cheby %in% sort(cheby)[1:10])
cos_close <- which(cosi %in% sort(cosi)[1:10])
cat("\nEuclidean closest point indexes :\n")
print(euc_close)
cat("\nMahalanobis closest point indexes :\n")
print(maha_close)
cat("\nCity block closest point indexes :\n")
print(city_close)
cat("\nMinkowski closest point indexes :\n")
print(min_close)
cat("\nChebyshev closest point indexes :\n")
print(cheby_close)
cat("\nCosine closest point indexes :\n")
print(cos_close)
n <- readline(prompt="Enter the number for the action: \n1.lat-long values\n2.2D plot\n3.Correlation\n4.P\n5.Euclidean\n6.Mahalanobis\n7.City Block\n8.Minkowski\n9.Chebyshev\n10.Cosine\n11.Euclidean2D\n12.Mahalanobis2D\n13.City Block2D\n14.Minkowski2D\n15.Chebyshev2D\n16.Cosine2D\n")
if(n==1)
{
cat("\nThe lat and long columns are :\n")
print(coord)
menu(mydata1)
}
else if(n==2)
{
plot(long~lat,data=mydata1,xlab="Latitude", ylab="Longitude",type="l")
menu(mydata1)
}
else if(n==3)
{
cat("\nThe Correlation is :\n")
print(cor_dim)
menu(mydata1)
}
else if(n==4)
{
cat("\nThe Point P :\n")
print(mat2)
menu(mydata1)
}
else if(n==5)
{
cat("\nThe Euclidean Distances are :\n")
print(euclid)
menu(mydata1)
}
else if(n==6)
{
cat("\nThe Mahanalobis Distances are :\n")
print(maha)
menu(mydata1)
}
else if(n==7)
{
cat("\nThe City Block Distances are :\n")
print(cityblock)
menu(mydata1)
}
else if(n==8)
{
cat("\nThe Minkowski Distances are :\n")
print(minwok)
menu(mydata1)
}
else if(n==9)
{
cat("\nThe Chebyshev Distances are :\n")
print(cheby)
menu(mydata1)
}
else if(n==10)
{
cat("\nThe Cosine Distances are :\n")
print(cosi)
menu(mydata1)
}
else if(n==11)
{
plot_data <- matrix(c(0),nrow=length(euc_close),ncol=2)
plot(mat2,xlim=c(-79.1,-78.1),ylim=c(37,37.62),xlab="Latitude", ylab="Longitude",main="Euclidean Distance",type="p",pch=4,col="blue")
for(i in 1:length(euc_close))
{
plot_data[i,1] = mydata1[euc_close[i],2]
plot_data[i,2] = mydata1[euc_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Euclidean points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==12)
{
plot_data <- matrix(c(0),nrow=length(maha_close),ncol=2)
plot(mat2,xlim=c(-78.2,-76.6),ylim=c(36.9,40.8),xlab="Latitude", ylab="Longitude",main="Mahalanobis Distance",type="p",pch=4,col="blue")
for(i in 1:length(maha_close))
{
plot_data[i,1] = mydata1[maha_close[i],2]
plot_data[i,2] = mydata1[maha_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Mahalanobis points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==13)
{
plot_data <- matrix(c(0),nrow=length(city_close),ncol=2)
plot(mat2,xlim=c(-79.2,-78.1),ylim=c(37,37.5),xlab="Latitude", ylab="Longitude",main="City Block Distance",type="p",pch=4,col="blue")
for(i in 1:length(city_close))
{
plot_data[i,1] = mydata1[city_close[i],2]
plot_data[i,2] = mydata1[city_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest City Block points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==14)
{
plot_data <- matrix(c(0),nrow=length(min_close),ncol=2)
plot(mat2,xlim=c(-79.1,-78.1),ylim=c(37,37.62),xlab="Latitude", ylab="Longitude",main="Minkowski Distance",type="p",pch=4,col="blue")
for(i in 1:length(min_close))
{
plot_data[i,1] = mydata1[min_close[i],2]
plot_data[i,2] = mydata1[min_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Minkowski points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==15)
{
plot_data <- matrix(c(0),nrow=length(cheby_close),ncol=2)
plot(mat2,xlim=c(-79.1,-78.1),ylim=c(37,37.62),xlab="Latitude", ylab="Longitude",type="p",main="Chebyshev Distance",pch=4,col="blue")
for(i in 1:length(cheby_close))
{
plot_data[i,1] = mydata1[cheby_close[i],2]
plot_data[i,2] = mydata1[cheby_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Chebyshev points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==16)
{
plot_data <- matrix(c(0),nrow=length(cos_close),ncol=2)
plot(mat2,xlim=c(-79.1,-78.1),ylim=c(37,37.5),xlab="Latitude", ylab="Longitude",main="Cosine Distance",type="p",pch=4,col="blue")
for(i in 1:length(cos_close))
{
plot_data[i,1] = mydata1[cos_close[i],2]
plot_data[i,2] = mydata1[cos_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Cosine points are : \n")
print(plot_data)
menu(mydata1)
}
}
| /VariousAnalyticalDistancesofDataScience.R | no_license | Vipulvaibhav/CodeSamples | R | false | false | 7,161 | r | initialize <- function(filepath)
{
#if(!("fields" %in% rownames(installed.packages())))
#{
# require("fields")
#}
library("fields")
mydata1 <- read.csv(filepath)
menu(mydata1)
}
#--------(A)----Displaying the Lat and Long Co-ordinates-------
menu <- function(mydata1)
{
lat <- mydata1$lat
lon <- mydata1$long
coord <- cbind(lat,lon)
#--------(C)-------Correlation of the data----------
cor_dim <- cor(mydata1["lat"],mydata1["long"])
#--------(D)-------Mean of the Variables--------
mean_lat <- mean(mydata1$lat)
mean_long <- mean(mydata1$long)
mat1 <- matrix(c(mydata1$long,mydata1$lat),nrow=length(mydata1$long))
mat2 <- matrix(c(mean_long,mean_lat),nrow=1,ncol=2)
colnames(mat2) <- c("long", "lat")
#-------------Distance measures-----------
euclid <- rdist(mat2,mat1)
covar <- cov(mat1,mat1)
maha <- mahalanobis(mat1, mat2, covar, inverted = FALSE)
cityblock <- matrix(c(0),nrow=length(mydata1$lat))
for(i in 1:nrow(mat1))
{
cityblock[i] = abs(mat1[i,1] - mat2[1])+ abs(mat1[i,2] - mat2[2])
}
#dist(rbind(mat1,mat2), method = "minkowski", diag = FALSE, upper = FALSE, p = 3)
minwok <- matrix(c(0),nrow=length(mydata1$lat))
for(i in 1:nrow(mat1))
{
minwok[i] = ((abs(mat1[i,1] - mat2[1]))^3 + (abs(mat1[i,2] - mat2[2]))^3)^(1/3)
}
cheby <- matrix(c(0),nrow=length(mydata1$lat))
for(i in 1:nrow(mat1))
{
cheby[i] = max(abs(mat1[i,1] - mat2[1]), abs(mat1[i,2] - mat2[2]))
}
library("lsa")
cosi <- matrix(c(0),nrow=length(mydata1$lat))
for(i in 1:nrow(mat1))
{
cosi[i] = 1 - ((mat2[1]*mat1[i,1]+mat2[2]*mat1[i,2])/ (sqrt(abs(mat1[i,1]^2)+abs(mat1[i,2]^2)) *sqrt(abs(mat2[1]^2)+abs(mat2[2]^2))))
}
#-------Marking the 10 closest Points------------
euc_close <- which(euclid %in% sort(euclid)[1:10])
maha_close <- which(maha %in% sort(maha)[1:10])
city_close <- which(cityblock %in% sort(cityblock)[1:10])
min_close <- which(minwok %in% sort(minwok)[1:10])
cheby_close <- which(cheby %in% sort(cheby)[1:10])
cos_close <- which(cosi %in% sort(cosi)[1:10])
cat("\nEuclidean closest point indexes :\n")
print(euc_close)
cat("\nMahalanobis closest point indexes :\n")
print(maha_close)
cat("\nCity block closest point indexes :\n")
print(city_close)
cat("\nMinkowski closest point indexes :\n")
print(min_close)
cat("\nChebyshev closest point indexes :\n")
print(cheby_close)
cat("\nCosine closest point indexes :\n")
print(cos_close)
n <- readline(prompt="Enter the number for the action: \n1.lat-long values\n2.2D plot\n3.Correlation\n4.P\n5.Euclidean\n6.Mahalanobis\n7.City Block\n8.Minkowski\n9.Chebyshev\n10.Cosine\n11.Euclidean2D\n12.Mahalanobis2D\n13.City Block2D\n14.Minkowski2D\n15.Chebyshev2D\n16.Cosine2D\n")
if(n==1)
{
cat("\nThe lat and long columns are :\n")
print(coord)
menu(mydata1)
}
else if(n==2)
{
plot(long~lat,data=mydata1,xlab="Latitude", ylab="Longitude",type="l")
menu(mydata1)
}
else if(n==3)
{
cat("\nThe Correlation is :\n")
print(cor_dim)
menu(mydata1)
}
else if(n==4)
{
cat("\nThe Point P :\n")
print(mat2)
menu(mydata1)
}
else if(n==5)
{
cat("\nThe Euclidean Distances are :\n")
print(euclid)
menu(mydata1)
}
else if(n==6)
{
cat("\nThe Mahanalobis Distances are :\n")
print(maha)
menu(mydata1)
}
else if(n==7)
{
cat("\nThe City Block Distances are :\n")
print(cityblock)
menu(mydata1)
}
else if(n==8)
{
cat("\nThe Minkowski Distances are :\n")
print(minwok)
menu(mydata1)
}
else if(n==9)
{
cat("\nThe Chebyshev Distances are :\n")
print(cheby)
menu(mydata1)
}
else if(n==10)
{
cat("\nThe Cosine Distances are :\n")
print(cosi)
menu(mydata1)
}
else if(n==11)
{
plot_data <- matrix(c(0),nrow=length(euc_close),ncol=2)
plot(mat2,xlim=c(-79.1,-78.1),ylim=c(37,37.62),xlab="Latitude", ylab="Longitude",main="Euclidean Distance",type="p",pch=4,col="blue")
for(i in 1:length(euc_close))
{
plot_data[i,1] = mydata1[euc_close[i],2]
plot_data[i,2] = mydata1[euc_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Euclidean points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==12)
{
plot_data <- matrix(c(0),nrow=length(maha_close),ncol=2)
plot(mat2,xlim=c(-78.2,-76.6),ylim=c(36.9,40.8),xlab="Latitude", ylab="Longitude",main="Mahalanobis Distance",type="p",pch=4,col="blue")
for(i in 1:length(maha_close))
{
plot_data[i,1] = mydata1[maha_close[i],2]
plot_data[i,2] = mydata1[maha_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Mahalanobis points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==13)
{
plot_data <- matrix(c(0),nrow=length(city_close),ncol=2)
plot(mat2,xlim=c(-79.2,-78.1),ylim=c(37,37.5),xlab="Latitude", ylab="Longitude",main="City Block Distance",type="p",pch=4,col="blue")
for(i in 1:length(city_close))
{
plot_data[i,1] = mydata1[city_close[i],2]
plot_data[i,2] = mydata1[city_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest City Block points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==14)
{
plot_data <- matrix(c(0),nrow=length(min_close),ncol=2)
plot(mat2,xlim=c(-79.1,-78.1),ylim=c(37,37.62),xlab="Latitude", ylab="Longitude",main="Minkowski Distance",type="p",pch=4,col="blue")
for(i in 1:length(min_close))
{
plot_data[i,1] = mydata1[min_close[i],2]
plot_data[i,2] = mydata1[min_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Minkowski points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==15)
{
plot_data <- matrix(c(0),nrow=length(cheby_close),ncol=2)
plot(mat2,xlim=c(-79.1,-78.1),ylim=c(37,37.62),xlab="Latitude", ylab="Longitude",type="p",main="Chebyshev Distance",pch=4,col="blue")
for(i in 1:length(cheby_close))
{
plot_data[i,1] = mydata1[cheby_close[i],2]
plot_data[i,2] = mydata1[cheby_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Chebyshev points are : \n")
print(plot_data)
menu(mydata1)
}
else if(n==16)
{
plot_data <- matrix(c(0),nrow=length(cos_close),ncol=2)
plot(mat2,xlim=c(-79.1,-78.1),ylim=c(37,37.5),xlab="Latitude", ylab="Longitude",main="Cosine Distance",type="p",pch=4,col="blue")
for(i in 1:length(cos_close))
{
plot_data[i,1] = mydata1[cos_close[i],2]
plot_data[i,2] = mydata1[cos_close[i],3]
points(plot_data[i,1],plot_data[i,2],xlab="Latitude", ylab="Longitude",type="p",col = "red", pch=1)
}
cat("\nThe 10 closest Cosine points are : \n")
print(plot_data)
menu(mydata1)
}
}
|
# tocID <- "scripts/ABC-dbUtilities.R"
#
# Purpose: Database utilities for ABC learning units.
#
# Version 2.2
#
# Date: 2017-11 - 2020-10
# Author: Boris Steipe (boris.steipe@utoronto.ca)
#
# Versions:
# 2.2 Bugfixes
# 2.1 Add JSON export functions
# 2.0 Test all JSON import and prevent addition of duplicates. This
# is necessary for import of data from the public page
# 1.1 2020 Updates
# 1.0 Live version 2017
#
# Notes:
# There are no functions to modify or delete entries. To do either,
# recreate the database with correct data in the creation script. This is the
# preferred way that ensures the entire database can be reproduced by
# source()'ing its generating script.
#
# Inserting data goes only through the very most minimal validation steps. For
# production applications, more validation would need to be added, as well
# as an overall validation of database integrity
#
# ToDo:
#
# ==============================================================================
#TOC> ==========================================================================
#TOC>
#TOC> Section Title Line
#TOC> -------------------------------------------------------
#TOC> 1 INITIALISATIONS AND PARAMETERS 61
#TOC> 2 PACKAGES 66
#TOC> 3 FUNCTIONS 82
#TOC> 3.01 dbSanitizeSequence() 85
#TOC> 3.02 dbConfirmUnique() 120
#TOC> 3.03 dbInit() 138
#TOC> 3.04 dbAutoincrement() 178
#TOC> 3.05 dbAddProtein() 191
#TOC> 3.06 dbAddFeature() 227
#TOC> 3.07 dbAddTaxonomy() 258
#TOC> 3.08 dbAddAnnotation() 293
#TOC> 3.09 dbFetchUniProtSeq() 340
#TOC> 3.10 dbFetchPrositeFeatures() 386
#TOC> 3.11 node2text() 436
#TOC> 3.12 dbFetchNCBItaxData() 448
#TOC> 3.13 UniProtIDmap() 487
#TOC> 3.14 dbProt2JSON() 526
#TOC> 3.15 dbSeq2JSON() 611
#TOC> 3.16 dbRow2JSON() 640
#TOC> 4 TESTS 660
#TOC>
#TOC> ==========================================================================
# = 1 INITIALISATIONS AND PARAMETERS ======================================
doTESTS <- FALSE # run tests if TRUE
# = 2 PACKAGES ============================================================
if (! requireNamespace("jsonlite", quietly = TRUE)) {
install.packages("jsonlite")
}
if (! requireNamespace("httr", quietly = TRUE)) {
install.packages("httr")
}
if (! requireNamespace("xml2", quietly = TRUE)) {
install.packages("xml2")
}
# = 3 FUNCTIONS ===========================================================
# == 3.01 dbSanitizeSequence() =============================================
dbSanitizeSequence <- function(s, unambiguous = TRUE) {
# Remove FASTA header lines, if any,
# flatten any structure that s has,
# remove all non-letters except "-" (gap) and "*" (stop),
# convert to uppercase.
#
# Parameters:
# s chr A DNA or protein sequence plus other characters
# unambiguous bool if TRUE, stop() if any letter remaining after
# processing matches an ambiguity code. This is likely
# due to inadvertently including meta-data, such as
# a FASTA header, with the sequence.
# Note: since U is an ambiguity code for amino acid sequences, you need
# to set unambiguous = FALSE to process RNA sequences with Uracil.
# Value: chr a valid, uppercase, amino acid sequence
#
s <- as.character(unlist(s)) # convert complex object to plain chr vector
s <- unlist(strsplit(s, "\n")) # split up at linebreaks, if any
s <- s[! grepl("^>", s)] # drop all lines beginning">" (FASTA header)
s <- paste(s, collapse="") # combine into single string
s <- toupper(gsub("[^a-zA-Z*-]", "", s))
if (unambiguous) {
amb <- "([bjouxzBJOUXZ])" # parentheses capture the match
ambChar <- unlist(regmatches(s, regexec(amb, s)))[1]
if (! is.na(ambChar)) {
stop(paste("Input contains ambiguous codes(s): \"",
ambChar, "\".", sep=""))
}
}
return(s)
}
# == 3.02 dbConfirmUnique() ================================================
dbConfirmUnique <- function(x) {
# x is a vector of logicals.
# returns x if x has exactly one TRUE element.
# stop() otherwise.
if (any(!is.logical(x))) {
stop("PANIC: Input is not a boolean vector.")
} else if (sum(x) == 0) {
stop("PANIC: No match found.")
} else if (sum(x) > 1) {
stop("PANIC: More than one match found.")
} else {
return(x)
}
}
# == 3.03 dbInit() =========================================================
dbInit <- function() {
# Return an empty instance of the protein database
# The schema is here:
# https://docs.google.com/presentation/d/13vWaVcFpWEOGeSNhwmqugj2qTQuH1eZROgxWdHGEMr0
db <- list()
db$version <- "1.0"
db$protein <- data.frame(
ID = numeric(),
name = character(),
RefSeqID = character(),
UniProtID = character(),
taxonomyID = numeric(),
sequence = character())
db$taxonomy <- data.frame(
ID = numeric(),
species = character())
db$annotation <- data.frame(
ID = numeric(),
proteinID = numeric(),
featureID = numeric(),
start = numeric(),
end = numeric())
db$feature <- data.frame(
ID = numeric(),
name = character(),
description = character(),
sourceDB = character(),
accession = character())
return(db)
}
# == 3.04 dbAutoincrement() ================================================
dbAutoincrement <- function(tb) {
# Return a unique integer that can be used as a primary key
# Value:
# num a number one-larger than the largest current value in table$ID
if (length(tb$ID) == 0) {
return(1)
} else {
return(max(tb$ID) + 1)
}
}
# == 3.05 dbAddProtein() ===================================================
dbAddProtein <- function(db, jsonDF) {
# Add one or more protein entries to the database db if a protein with the
# same name does not yet exist. This enforces that protein names are unique.
# Parameters:
# db list a database created with dbInit()
# jsonDF data frame protein data imported into a data frame with
# fromJSON()
for (i in seq_along(jsonDF$name)) {
isValid <- TRUE
if (jsonDF$name[i] %in% db$protein$name) {
cat(sprintf("Note: Protein No. %d in the input is \"%s\", but %s.\n",
i, jsonDF$name[i],
"a protein with this name already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
}
if (isValid) {
if (length(jsonDF$name) == 1) { # jsonlite:: oversimplifies
jsonDF$sequence <- paste(jsonDF$sequence, collapse = "")
}
x <- data.frame(ID = dbAutoincrement(db$protein),
name = jsonDF$name[i],
RefSeqID = jsonDF$RefSeqID[i],
UniProtID = jsonDF$UniProtID[i],
taxonomyID = as.integer(jsonDF$taxonomyID[i]),
sequence = dbSanitizeSequence(jsonDF$sequence[i]))
db$protein <- rbind(db$protein, x)
}
}
return(db)
}
# == 3.06 dbAddFeature() ===================================================
dbAddFeature <- function(db, jsonDF) {
# Add one or more feature entries to the database db. Skip if a feature with
# the same name already exists.
# Parameters:
# db list a database created with dbInit()
# jsonDF data frame feature data imported into a data frame with
# fromJSON()
for (i in seq_along(jsonDF$name)) {
isValid <- TRUE
if (jsonDF$name[i] %in% db$feature$name) {
cat(sprintf("Note: Feature No. %d in the input is \"%s\", but %s.\n",
i, jsonDF$name[i],
"a feature with this name already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
}
if (isValid) {
x <- data.frame(ID = dbAutoincrement(db$feature),
name = jsonDF$name[i],
description = jsonDF$description[i],
sourceDB = jsonDF$sourceDB[i],
accession = jsonDF$accession[i])
db$feature <- rbind(db$feature, x)
}
}
return(db)
}
# == 3.07 dbAddTaxonomy() ==================================================
dbAddTaxonomy <- function(db, jsonDF) {
# Add one or more taxonomy entries to the database db. Skip if species name
# or taxonomy ID already exist in the database.
# Parameters:
# db list A database created with dbInit()
# jsonDF data frame Taxonomy data imported into a data frame with
# fromJSON()
for (i in seq_along(jsonDF$species)) {
isValid <- TRUE
if (jsonDF$species[i] %in% db$taxonomy$species) {
cat(sprintf("Note: Species No. %d in the input is \"%s\", but %s%s\n",
i, jsonDF$name[i],
"a species with this name already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
} else if (jsonDF$ID[i] %in% db$taxonomy$ID) {
cat(sprintf("Note: Taxonomy ID No. %d in the input is \"%d\", but %s%s\n",
i, jsonDF$ID[i],
"this taxonomy ID already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
}
if (isValid) {
x <- data.frame(
ID = as.integer(jsonDF$ID[i]),
species = jsonDF$species[i])
db$taxonomy <- rbind(db$taxonomy, x)
}
}
return(db)
}
# == 3.08 dbAddAnnotation() ================================================
dbAddAnnotation <- function(db, jsonDF) {
# Add one or more annotation entries to the database db. Skip the entry if
# it already exists in the database.
# Parameters:
# db list a database created with dbInit()
# jsonDF data frame annotation data imported into a data frame with
# fromJSON()
for (i in seq_along(jsonDF$pName)) {
isValid <- TRUE
sel <- jsonDF$pName[i] == db$protein$name
sel <- dbConfirmUnique(sel) # Confirm that this protein ID exists
pID <- db$protein$ID[sel]
sel <- jsonDF$fName[i] == db$feature$name
sel <- dbConfirmUnique(sel) # Confirm that this feature ID exists
fID <- db$feature$ID[sel]
sel <- db$annotation$proteinID == pID &
db$annotation$featureID == fID &
db$annotation$start == as.integer(jsonDF$start[i]) &
db$annotation$end == as.integer(jsonDF$end[i])
if (any(sel)) {
cat(sprintf("Note: annotation No. %d in the input has %s%s\n",
i,
"the same protein name, feature name, start, and end ",
"as one that already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
}
if (isValid) {
x <- data.frame(ID = dbAutoincrement(db$annotation),
proteinID = pID,
featureID = fID,
start = as.integer(jsonDF$start[i]),
end = as.integer(jsonDF$end[i]))
db$annotation <- rbind(db$annotation, x)
}
}
return(db)
}
# == 3.09 dbFetchUniProtSeq() ==============================================
dbFetchUniProtSeq <- function(IDs) {
# Fetch a protein sequence from UniProt.
# Parameters:
# IDs char a vector of UniProt IDs (accession number)
# Value:
# char a vector of the same length as ID. It contains
# sequences where the retrieval was successful, NA where
# it was not successful. The elements are named with
# the ID, the header lines are set as attribute "header"
BASE <- "http://www.uniprot.org/uniprot/"
sq <- character()
hd <- character()
for (i in seq_along(IDs)) {
URL <- sprintf("%s%s.fasta", BASE, IDs[i])
response <- httr::GET(URL)
if (httr::status_code(response) == 200) {
s <- as.character(response)
s <- unlist(strsplit(s, "\n"))
x <- dbSanitizeSequence(s)
} else {
s <- ""
x <- NA
}
hd[i] <- s[1]
sq[i] <- x
}
names(sq) <- IDs
attr(sq, "headers") <- hd
return(sq)
}
if (FALSE) {
inp <- c("P79073", "P0000000", "A0A1W2TKZ7")
s <- dbFetchUniProtSeq(inp)
s[1:3]
str(s)
attr(s, "headers")[1]
}
# == 3.10 dbFetchPrositeFeatures() =========================================
dbFetchPrositeFeatures <- function(ID) {
# Fetch feature annotations from ScanProsite.
# Parameters:
# ID char a UniProt ID (accession number)
# Value:
# data frame uID char UniProt ID
# start num start of motif
# end num end of motif
# psID char PROSITE motif ID
# psName char PROSITE motif name
# psSeq char sequence annotated to the feature
# If the operation is not successful, a 0-length data frame is returned.
URL <- "https://prosite.expasy.org/cgi-bin/prosite/PSScan.cgi"
response <- httr::POST(URL,
body = list(meta = "opt1",
meta1_protein = "opt1",
seq = ID,
skip = "on",
output = "tabular"))
myFeatures <- data.frame()
if (httr::status_code(response) == 200) {
lines <- unlist(strsplit(httr::content(response, "text"), "\\n"))
patt <- sprintf("\\|%s\\|", ID)
lines <- lines[grep(patt, lines)]
for (line in lines) {
tokens <- unlist(strsplit(line, "\\t|\\|"))
myFeatures <- rbind(myFeatures,
data.frame(uID = tokens[2],
start = as.numeric(tokens[4]),
end = as.numeric(tokens[5]),
psID = tokens[6],
psName = tokens[7],
psSeq = tokens[11]))
}
}
return(myFeatures)
}
if (FALSE) {
dbFetchPrositeFeatures("P33520") # RES1_SCHPO
}
# == 3.11 node2text() ======================================================
node2text <- function(doc, tag) {
# an extractor function for the contents of elements
# between given tags in an XML response.
# Contents of all matching elements is returned in
# a vector of strings.
path <- paste0("//", tag)
nodes <- xml2::xml_find_all(doc, path)
return(xml2::xml_text(nodes))
}
# == 3.12 dbFetchNCBItaxData() =============================================
dbFetchNCBItaxData <- function(ID) {
# Fetch feature taxID and Organism from the NCBI.
# Parameters:
# ID char a RefSeq ID (accession number)
# Value:
# data frame taxID num NCBI taxID
# organism char organism for this taxID
# If the operation is not successful, a 0-length data frame is returned.
eUtilsBase <- "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
URL <- paste(eUtilsBase,
"esearch.fcgi?",
"db=protein",
"&term=", ID,
sep="")
myXML <- xml2::read_xml(URL)
GID <- node2text(myXML, "Id")
URL <- paste0(eUtilsBase,
"esummary.fcgi?",
"db=protein",
"&id=",
GID,
"&version=2.0")
myXML <- xml2::read_xml(URL)
x <- as.integer(node2text(myXML, "TaxId"))
y <- node2text(myXML, "Organism")
tID <- data.frame()
if (length(x) > 0 && length(y) > 0) {
tID <- data.frame(taxID = x, organism = y)
}
return(tID)
}
# == 3.13 UniProtIDmap() ===================================================
UniProtIDmap <- function (s, mapFrom = "P_REFSEQ_AC", mapTo = "ACC") {
# Use UniProt ID mapping service to map one or more IDs
# Parameters:
# s char A string of white-space separated IDs
# mapFrom char the database in which the IDs in s are valid.
# Default is RefSeq protein
# mapTo char the database in which the target IDs are valid.
# Default is UniProtKB
# Value
# A data frame of mapped IDs, with column names From and To, or an
# empty data frame if the mapping was unsuccessful. No rows are returned
# for IDs that are not mapped.
# Initialize curl
httr::set_config(httr::config(http_version = 0))
URL <- "https://www.uniprot.org/uploadlists/"
response <- httr::POST(URL,
body = list(from = mapFrom,
to = mapTo,
format = "tab",
query = s))
if (httr::status_code(response) == 200) { # 200: oK
myMap <- read.delim(file = textConnection(httr::content(response)),
sep = "\t")
colnames(myMap) <- c("From", "To")
} else {
myMap <- data.frame()
warning(paste("No uniProt ID mapping returned:",
"server sent status",
httr::status_code(response)))
}
return(myMap)
}
# == 3.14 dbProt2JSON() ====================================================
dbProt2JSON <- function(thisProt) {
# Extract all protein related data from myDB and return in JSON format.
thisData <- list()
# add a protein table
sel <- which(myDB$protein$name == thisProt)
thisData$protein <- myDB$protein[sel, ]
# add a taxonomy table
sel <- which(myDB$taxonomy$ID == thisData$protein$taxonomyID)
thisData$taxonomy <- myDB$taxonomy[sel, ]
# add the entries for this protein from the annotation table
sel <- which(myDB$annotation$proteinID == thisData$protein$ID)
thisData$annotation <- myDB$annotation[sel, ]
# our .json convention uses pName and fName as keys, not the db-internal IDs
# add empty columns for pName and fName
l <- nrow(thisData$annotation)
thisData$annotation$pName <- character(l)
thisData$annotation$fName <- character(l)
# get the appropriate protein and feature names
for (i in seq_len(l)) {
pID <- thisData$annotation$proteinID[i]
sel <- which(myDB$protein$ID == pID)
thisData$annotation$pName[i] <- myDB$protein$name[sel] # store pName
fID <- thisData$annotation$featureID[i]
sel <- which(myDB$feature$ID == fID)
thisData$annotation$fName[i] <- myDB$feature$name[sel] # store fName
}
# add the corresponding feature table
sel <- which(myDB$feature$ID %in% thisData$annotation$featureID)
thisData$feature <- myDB$feature[sel, ]
# remove columns that are not going into JSON output
thisData$protein$ID <- NULL
thisData$annotation$ID <- NULL
thisData$annotation$proteinID <- NULL
thisData$annotation$featureID <- NULL
thisData$feature$ID <- NULL
# create JSON-formatted output
# ( jsonlite::prettify() is too wordy for a compact Wikipage )
out <- character()
out <- c(out, '{')
out <- c(out, ' "protein": {')
sel <- colnames(thisData$protein) != "sequence"
out <- c(out, sprintf(" %s,", dbRow2JSON(thisData$protein[1, sel],
coll = ",\n ")))
out <- c(out, dbSeq2JSON(thisData$protein$sequence[1]))
out <- c(out, ' },')
out <- c(out, ' "taxonomy": {')
out <- c(out, sprintf(" %s", dbRow2JSON(thisData$taxonomy)))
out <- c(out, ' },')
out <- c(out, ' "annotation": [')
for (i in seq_len(nrow(thisData$annotation))) {
out <- c(out, sprintf(" {%s},", dbRow2JSON(thisData$annotation[i, ])))
}
out[length(out)] <- gsub(",$", "", out[length(out)]) # remove last ","
out <- c(out, ' ],')
out <- c(out, ' "feature": [')
sel <- colnames(thisData$feature) != "description"
for (i in seq_len(nrow(thisData$feature))) {
out <- c(out, sprintf(" {%s,",
dbRow2JSON(thisData$feature[i, sel])))
out <- c(out, sprintf(" %s},",
dbRow2JSON(thisData$feature[i, "description",
drop = FALSE])))
}
out[length(out)] <- gsub(",$", "", out[length(out)]) # remove last ","
out <- c(out, ' ]')
out <- c(out, '}')
return(paste0(out, collapse = "\n"))
}
# == 3.15 dbSeq2JSON() =====================================================
dbSeq2JSON <- function(s, nIndents = 4, width = 70) {
# Turn a sequence into a JSON key-value pair, with the value being a JSON
# array of elements not exceeding a width of "width", and an indent of
# "indents" spaces.
ind <- paste0(rep(" ", nIndents), collapse = "")
out <- character()
out <- c(out, sprintf("%s\"sequence\": [", ind))
for (i in seq_along(s)) {
l <- nchar(s[i])
if (l <= width) {
out <- c(out, s[i])
} else {
starts <- seq(1, l, by = width)
ends <- seq(width, l, by = width)
if (length(ends) < length(starts)) { ends <- c(ends, l) }
out <- c(out, sprintf("%s \"%s\",", ind, substring(s[i], starts, ends)))
}
}
out[length(out)] <- gsub(",$", "", out[length(out)]) # remove last ","
out <- c(out, sprintf("%s]", ind))
return(paste0(out, collapse = "\n"))
}
# == 3.16 dbRow2JSON() =====================================================
dbRow2JSON <- function(df, coll = ", ") {
# Turn a single dataframe row into JSON key value pairs, where the keys are the
# column names. Respects character / numeric mode.
out <- character()
for (i in 1:ncol(df)) {
if (class(df[1, i]) == "integer") {
val <- sprintf("%d", df[1, i])
} else if (class(df[1, i]) == "numeric") {
val <- sprintf("%f", df[1, i])
} else {
val <- sprintf("\"%s\"", as.character(df[1, i]))
}
out <- c(out, sprintf("\"%s\": %s", colnames(df)[i], val))
}
return(paste0(out, collapse = coll))
}
# = 4 TESTS ===============================================================
if (doTESTS) {
if (! requireNamespace("testthat", quietly = TRUE)) {
install.packages("testthat")
}
# ToDo: test everything here
}
# [END]
| /scripts/ABC-dbUtilities.R | no_license | liushangjian/ABC-units | R | false | false | 22,881 | r | # tocID <- "scripts/ABC-dbUtilities.R"
#
# Purpose: Database utilities for ABC learning units.
#
# Version 2.2
#
# Date: 2017-11 - 2020-10
# Author: Boris Steipe (boris.steipe@utoronto.ca)
#
# Versions:
# 2.2 Bugfixes
# 2.1 Add JSON export functions
# 2.0 Test all JSON import and prevent addition of duplicates. This
# is necessary for import of data from the public page
# 1.1 2020 Updates
# 1.0 Live version 2017
#
# Notes:
# There are no functions to modify or delete entries. To do either,
# recreate the database with correct data in the creation script. This is the
# preferred way that ensures the entire database can be reproduced by
# source()'ing its generating script.
#
# Inserting data goes only through the very most minimal validation steps. For
# production applications, more validation would need to be added, as well
# as an overall validation of database integrity
#
# ToDo:
#
# ==============================================================================
#TOC> ==========================================================================
#TOC>
#TOC> Section Title Line
#TOC> -------------------------------------------------------
#TOC> 1 INITIALISATIONS AND PARAMETERS 61
#TOC> 2 PACKAGES 66
#TOC> 3 FUNCTIONS 82
#TOC> 3.01 dbSanitizeSequence() 85
#TOC> 3.02 dbConfirmUnique() 120
#TOC> 3.03 dbInit() 138
#TOC> 3.04 dbAutoincrement() 178
#TOC> 3.05 dbAddProtein() 191
#TOC> 3.06 dbAddFeature() 227
#TOC> 3.07 dbAddTaxonomy() 258
#TOC> 3.08 dbAddAnnotation() 293
#TOC> 3.09 dbFetchUniProtSeq() 340
#TOC> 3.10 dbFetchPrositeFeatures() 386
#TOC> 3.11 node2text() 436
#TOC> 3.12 dbFetchNCBItaxData() 448
#TOC> 3.13 UniProtIDmap() 487
#TOC> 3.14 dbProt2JSON() 526
#TOC> 3.15 dbSeq2JSON() 611
#TOC> 3.16 dbRow2JSON() 640
#TOC> 4 TESTS 660
#TOC>
#TOC> ==========================================================================
# = 1 INITIALISATIONS AND PARAMETERS ======================================
doTESTS <- FALSE # run tests if TRUE
# = 2 PACKAGES ============================================================
if (! requireNamespace("jsonlite", quietly = TRUE)) {
install.packages("jsonlite")
}
if (! requireNamespace("httr", quietly = TRUE)) {
install.packages("httr")
}
if (! requireNamespace("xml2", quietly = TRUE)) {
install.packages("xml2")
}
# = 3 FUNCTIONS ===========================================================
# == 3.01 dbSanitizeSequence() =============================================
dbSanitizeSequence <- function(s, unambiguous = TRUE) {
# Remove FASTA header lines, if any,
# flatten any structure that s has,
# remove all non-letters except "-" (gap) and "*" (stop),
# convert to uppercase.
#
# Parameters:
# s chr A DNA or protein sequence plus other characters
# unambiguous bool if TRUE, stop() if any letter remaining after
# processing matches an ambiguity code. This is likely
# due to inadvertently including meta-data, such as
# a FASTA header, with the sequence.
# Note: since U is an ambiguity code for amino acid sequences, you need
# to set unambiguous = FALSE to process RNA sequences with Uracil.
# Value: chr a valid, uppercase, amino acid sequence
#
s <- as.character(unlist(s)) # convert complex object to plain chr vector
s <- unlist(strsplit(s, "\n")) # split up at linebreaks, if any
s <- s[! grepl("^>", s)] # drop all lines beginning">" (FASTA header)
s <- paste(s, collapse="") # combine into single string
s <- toupper(gsub("[^a-zA-Z*-]", "", s))
if (unambiguous) {
amb <- "([bjouxzBJOUXZ])" # parentheses capture the match
ambChar <- unlist(regmatches(s, regexec(amb, s)))[1]
if (! is.na(ambChar)) {
stop(paste("Input contains ambiguous codes(s): \"",
ambChar, "\".", sep=""))
}
}
return(s)
}
# == 3.02 dbConfirmUnique() ================================================
dbConfirmUnique <- function(x) {
# x is a vector of logicals.
# returns x if x has exactly one TRUE element.
# stop() otherwise.
if (any(!is.logical(x))) {
stop("PANIC: Input is not a boolean vector.")
} else if (sum(x) == 0) {
stop("PANIC: No match found.")
} else if (sum(x) > 1) {
stop("PANIC: More than one match found.")
} else {
return(x)
}
}
# == 3.03 dbInit() =========================================================
dbInit <- function() {
# Return an empty instance of the protein database
# The schema is here:
# https://docs.google.com/presentation/d/13vWaVcFpWEOGeSNhwmqugj2qTQuH1eZROgxWdHGEMr0
db <- list()
db$version <- "1.0"
db$protein <- data.frame(
ID = numeric(),
name = character(),
RefSeqID = character(),
UniProtID = character(),
taxonomyID = numeric(),
sequence = character())
db$taxonomy <- data.frame(
ID = numeric(),
species = character())
db$annotation <- data.frame(
ID = numeric(),
proteinID = numeric(),
featureID = numeric(),
start = numeric(),
end = numeric())
db$feature <- data.frame(
ID = numeric(),
name = character(),
description = character(),
sourceDB = character(),
accession = character())
return(db)
}
# == 3.04 dbAutoincrement() ================================================
dbAutoincrement <- function(tb) {
# Return a unique integer that can be used as a primary key
# Value:
# num a number one-larger than the largest current value in table$ID
if (length(tb$ID) == 0) {
return(1)
} else {
return(max(tb$ID) + 1)
}
}
# == 3.05 dbAddProtein() ===================================================
dbAddProtein <- function(db, jsonDF) {
# Add one or more protein entries to the database db if a protein with the
# same name does not yet exist. This enforces that protein names are unique.
# Parameters:
# db list a database created with dbInit()
# jsonDF data frame protein data imported into a data frame with
# fromJSON()
for (i in seq_along(jsonDF$name)) {
isValid <- TRUE
if (jsonDF$name[i] %in% db$protein$name) {
cat(sprintf("Note: Protein No. %d in the input is \"%s\", but %s.\n",
i, jsonDF$name[i],
"a protein with this name already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
}
if (isValid) {
if (length(jsonDF$name) == 1) { # jsonlite:: oversimplifies
jsonDF$sequence <- paste(jsonDF$sequence, collapse = "")
}
x <- data.frame(ID = dbAutoincrement(db$protein),
name = jsonDF$name[i],
RefSeqID = jsonDF$RefSeqID[i],
UniProtID = jsonDF$UniProtID[i],
taxonomyID = as.integer(jsonDF$taxonomyID[i]),
sequence = dbSanitizeSequence(jsonDF$sequence[i]))
db$protein <- rbind(db$protein, x)
}
}
return(db)
}
# == 3.06 dbAddFeature() ===================================================
dbAddFeature <- function(db, jsonDF) {
# Add one or more feature entries to the database db. Skip if a feature with
# the same name already exists.
# Parameters:
# db list a database created with dbInit()
# jsonDF data frame feature data imported into a data frame with
# fromJSON()
for (i in seq_along(jsonDF$name)) {
isValid <- TRUE
if (jsonDF$name[i] %in% db$feature$name) {
cat(sprintf("Note: Feature No. %d in the input is \"%s\", but %s.\n",
i, jsonDF$name[i],
"a feature with this name already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
}
if (isValid) {
x <- data.frame(ID = dbAutoincrement(db$feature),
name = jsonDF$name[i],
description = jsonDF$description[i],
sourceDB = jsonDF$sourceDB[i],
accession = jsonDF$accession[i])
db$feature <- rbind(db$feature, x)
}
}
return(db)
}
# == 3.07 dbAddTaxonomy() ==================================================
dbAddTaxonomy <- function(db, jsonDF) {
# Add one or more taxonomy entries to the database db. Skip if species name
# or taxonomy ID already exist in the database.
# Parameters:
# db list A database created with dbInit()
# jsonDF data frame Taxonomy data imported into a data frame with
# fromJSON()
for (i in seq_along(jsonDF$species)) {
isValid <- TRUE
if (jsonDF$species[i] %in% db$taxonomy$species) {
cat(sprintf("Note: Species No. %d in the input is \"%s\", but %s%s\n",
i, jsonDF$name[i],
"a species with this name already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
} else if (jsonDF$ID[i] %in% db$taxonomy$ID) {
cat(sprintf("Note: Taxonomy ID No. %d in the input is \"%d\", but %s%s\n",
i, jsonDF$ID[i],
"this taxonomy ID already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
}
if (isValid) {
x <- data.frame(
ID = as.integer(jsonDF$ID[i]),
species = jsonDF$species[i])
db$taxonomy <- rbind(db$taxonomy, x)
}
}
return(db)
}
# == 3.08 dbAddAnnotation() ================================================
dbAddAnnotation <- function(db, jsonDF) {
# Add one or more annotation entries to the database db. Skip the entry if
# it already exists in the database.
# Parameters:
# db list a database created with dbInit()
# jsonDF data frame annotation data imported into a data frame with
# fromJSON()
for (i in seq_along(jsonDF$pName)) {
isValid <- TRUE
sel <- jsonDF$pName[i] == db$protein$name
sel <- dbConfirmUnique(sel) # Confirm that this protein ID exists
pID <- db$protein$ID[sel]
sel <- jsonDF$fName[i] == db$feature$name
sel <- dbConfirmUnique(sel) # Confirm that this feature ID exists
fID <- db$feature$ID[sel]
sel <- db$annotation$proteinID == pID &
db$annotation$featureID == fID &
db$annotation$start == as.integer(jsonDF$start[i]) &
db$annotation$end == as.integer(jsonDF$end[i])
if (any(sel)) {
cat(sprintf("Note: annotation No. %d in the input has %s%s\n",
i,
"the same protein name, feature name, start, and end ",
"as one that already exists in the database. ",
"Skipping this input."))
isValid <- FALSE
}
if (isValid) {
x <- data.frame(ID = dbAutoincrement(db$annotation),
proteinID = pID,
featureID = fID,
start = as.integer(jsonDF$start[i]),
end = as.integer(jsonDF$end[i]))
db$annotation <- rbind(db$annotation, x)
}
}
return(db)
}
# == 3.09 dbFetchUniProtSeq() ==============================================
dbFetchUniProtSeq <- function(IDs) {
# Fetch a protein sequence from UniProt.
# Parameters:
# IDs char a vector of UniProt IDs (accession number)
# Value:
# char a vector of the same length as ID. It contains
# sequences where the retrieval was successful, NA where
# it was not successful. The elements are named with
# the ID, the header lines are set as attribute "header"
BASE <- "http://www.uniprot.org/uniprot/"
sq <- character()
hd <- character()
for (i in seq_along(IDs)) {
URL <- sprintf("%s%s.fasta", BASE, IDs[i])
response <- httr::GET(URL)
if (httr::status_code(response) == 200) {
s <- as.character(response)
s <- unlist(strsplit(s, "\n"))
x <- dbSanitizeSequence(s)
} else {
s <- ""
x <- NA
}
hd[i] <- s[1]
sq[i] <- x
}
names(sq) <- IDs
attr(sq, "headers") <- hd
return(sq)
}
if (FALSE) {
inp <- c("P79073", "P0000000", "A0A1W2TKZ7")
s <- dbFetchUniProtSeq(inp)
s[1:3]
str(s)
attr(s, "headers")[1]
}
# == 3.10 dbFetchPrositeFeatures() =========================================
dbFetchPrositeFeatures <- function(ID) {
# Fetch feature annotations from ScanProsite.
# Parameters:
# ID char a UniProt ID (accession number)
# Value:
# data frame uID char UniProt ID
# start num start of motif
# end num end of motif
# psID char PROSITE motif ID
# psName char PROSITE motif name
# psSeq char sequence annotated to the feature
# If the operation is not successful, a 0-length data frame is returned.
URL <- "https://prosite.expasy.org/cgi-bin/prosite/PSScan.cgi"
response <- httr::POST(URL,
body = list(meta = "opt1",
meta1_protein = "opt1",
seq = ID,
skip = "on",
output = "tabular"))
myFeatures <- data.frame()
if (httr::status_code(response) == 200) {
lines <- unlist(strsplit(httr::content(response, "text"), "\\n"))
patt <- sprintf("\\|%s\\|", ID)
lines <- lines[grep(patt, lines)]
for (line in lines) {
tokens <- unlist(strsplit(line, "\\t|\\|"))
myFeatures <- rbind(myFeatures,
data.frame(uID = tokens[2],
start = as.numeric(tokens[4]),
end = as.numeric(tokens[5]),
psID = tokens[6],
psName = tokens[7],
psSeq = tokens[11]))
}
}
return(myFeatures)
}
if (FALSE) {
dbFetchPrositeFeatures("P33520") # RES1_SCHPO
}
# == 3.11 node2text() ======================================================
node2text <- function(doc, tag) {
# an extractor function for the contents of elements
# between given tags in an XML response.
# Contents of all matching elements is returned in
# a vector of strings.
path <- paste0("//", tag)
nodes <- xml2::xml_find_all(doc, path)
return(xml2::xml_text(nodes))
}
# == 3.12 dbFetchNCBItaxData() =============================================
dbFetchNCBItaxData <- function(ID) {
# Fetch feature taxID and Organism from the NCBI.
# Parameters:
# ID char a RefSeq ID (accession number)
# Value:
# data frame taxID num NCBI taxID
# organism char organism for this taxID
# If the operation is not successful, a 0-length data frame is returned.
eUtilsBase <- "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
URL <- paste(eUtilsBase,
"esearch.fcgi?",
"db=protein",
"&term=", ID,
sep="")
myXML <- xml2::read_xml(URL)
GID <- node2text(myXML, "Id")
URL <- paste0(eUtilsBase,
"esummary.fcgi?",
"db=protein",
"&id=",
GID,
"&version=2.0")
myXML <- xml2::read_xml(URL)
x <- as.integer(node2text(myXML, "TaxId"))
y <- node2text(myXML, "Organism")
tID <- data.frame()
if (length(x) > 0 && length(y) > 0) {
tID <- data.frame(taxID = x, organism = y)
}
return(tID)
}
# == 3.13 UniProtIDmap() ===================================================
UniProtIDmap <- function (s, mapFrom = "P_REFSEQ_AC", mapTo = "ACC") {
# Use UniProt ID mapping service to map one or more IDs
# Parameters:
# s char A string of white-space separated IDs
# mapFrom char the database in which the IDs in s are valid.
# Default is RefSeq protein
# mapTo char the database in which the target IDs are valid.
# Default is UniProtKB
# Value
# A data frame of mapped IDs, with column names From and To, or an
# empty data frame if the mapping was unsuccessful. No rows are returned
# for IDs that are not mapped.
# Initialize curl
httr::set_config(httr::config(http_version = 0))
URL <- "https://www.uniprot.org/uploadlists/"
response <- httr::POST(URL,
body = list(from = mapFrom,
to = mapTo,
format = "tab",
query = s))
if (httr::status_code(response) == 200) { # 200: oK
myMap <- read.delim(file = textConnection(httr::content(response)),
sep = "\t")
colnames(myMap) <- c("From", "To")
} else {
myMap <- data.frame()
warning(paste("No uniProt ID mapping returned:",
"server sent status",
httr::status_code(response)))
}
return(myMap)
}
# == 3.14 dbProt2JSON() ====================================================
dbProt2JSON <- function(thisProt) {
# Extract all protein related data from myDB and return in JSON format.
thisData <- list()
# add a protein table
sel <- which(myDB$protein$name == thisProt)
thisData$protein <- myDB$protein[sel, ]
# add a taxonomy table
sel <- which(myDB$taxonomy$ID == thisData$protein$taxonomyID)
thisData$taxonomy <- myDB$taxonomy[sel, ]
# add the entries for this protein from the annotation table
sel <- which(myDB$annotation$proteinID == thisData$protein$ID)
thisData$annotation <- myDB$annotation[sel, ]
# our .json convention uses pName and fName as keys, not the db-internal IDs
# add empty columns for pName and fName
l <- nrow(thisData$annotation)
thisData$annotation$pName <- character(l)
thisData$annotation$fName <- character(l)
# get the appropriate protein and feature names
for (i in seq_len(l)) {
pID <- thisData$annotation$proteinID[i]
sel <- which(myDB$protein$ID == pID)
thisData$annotation$pName[i] <- myDB$protein$name[sel] # store pName
fID <- thisData$annotation$featureID[i]
sel <- which(myDB$feature$ID == fID)
thisData$annotation$fName[i] <- myDB$feature$name[sel] # store fName
}
# add the corresponding feature table
sel <- which(myDB$feature$ID %in% thisData$annotation$featureID)
thisData$feature <- myDB$feature[sel, ]
# remove columns that are not going into JSON output
thisData$protein$ID <- NULL
thisData$annotation$ID <- NULL
thisData$annotation$proteinID <- NULL
thisData$annotation$featureID <- NULL
thisData$feature$ID <- NULL
# create JSON-formatted output
# ( jsonlite::prettify() is too wordy for a compact Wikipage )
out <- character()
out <- c(out, '{')
out <- c(out, ' "protein": {')
sel <- colnames(thisData$protein) != "sequence"
out <- c(out, sprintf(" %s,", dbRow2JSON(thisData$protein[1, sel],
coll = ",\n ")))
out <- c(out, dbSeq2JSON(thisData$protein$sequence[1]))
out <- c(out, ' },')
out <- c(out, ' "taxonomy": {')
out <- c(out, sprintf(" %s", dbRow2JSON(thisData$taxonomy)))
out <- c(out, ' },')
out <- c(out, ' "annotation": [')
for (i in seq_len(nrow(thisData$annotation))) {
out <- c(out, sprintf(" {%s},", dbRow2JSON(thisData$annotation[i, ])))
}
out[length(out)] <- gsub(",$", "", out[length(out)]) # remove last ","
out <- c(out, ' ],')
out <- c(out, ' "feature": [')
sel <- colnames(thisData$feature) != "description"
for (i in seq_len(nrow(thisData$feature))) {
out <- c(out, sprintf(" {%s,",
dbRow2JSON(thisData$feature[i, sel])))
out <- c(out, sprintf(" %s},",
dbRow2JSON(thisData$feature[i, "description",
drop = FALSE])))
}
out[length(out)] <- gsub(",$", "", out[length(out)]) # remove last ","
out <- c(out, ' ]')
out <- c(out, '}')
return(paste0(out, collapse = "\n"))
}
# == 3.15 dbSeq2JSON() =====================================================
dbSeq2JSON <- function(s, nIndents = 4, width = 70) {
# Turn a sequence into a JSON key-value pair, with the value being a JSON
# array of elements not exceeding a width of "width", and an indent of
# "indents" spaces.
ind <- paste0(rep(" ", nIndents), collapse = "")
out <- character()
out <- c(out, sprintf("%s\"sequence\": [", ind))
for (i in seq_along(s)) {
l <- nchar(s[i])
if (l <= width) {
out <- c(out, s[i])
} else {
starts <- seq(1, l, by = width)
ends <- seq(width, l, by = width)
if (length(ends) < length(starts)) { ends <- c(ends, l) }
out <- c(out, sprintf("%s \"%s\",", ind, substring(s[i], starts, ends)))
}
}
out[length(out)] <- gsub(",$", "", out[length(out)]) # remove last ","
out <- c(out, sprintf("%s]", ind))
return(paste0(out, collapse = "\n"))
}
# == 3.16 dbRow2JSON() =====================================================
dbRow2JSON <- function(df, coll = ", ") {
# Turn a single dataframe row into JSON key value pairs, where the keys are the
# column names. Respects character / numeric mode.
out <- character()
for (i in 1:ncol(df)) {
if (class(df[1, i]) == "integer") {
val <- sprintf("%d", df[1, i])
} else if (class(df[1, i]) == "numeric") {
val <- sprintf("%f", df[1, i])
} else {
val <- sprintf("\"%s\"", as.character(df[1, i]))
}
out <- c(out, sprintf("\"%s\": %s", colnames(df)[i], val))
}
return(paste0(out, collapse = coll))
}
# = 4 TESTS ===============================================================
if (doTESTS) {
if (! requireNamespace("testthat", quietly = TRUE)) {
install.packages("testthat")
}
# ToDo: test everything here
}
# [END]
|
concrete <- read.csv(file.choose())
str(concrete)
hist(concrete$strength, probability = T, breaks = 30)
lines(density(concrete$strength))
hist(concrete$age, probability = T, breaks = 30)
lines(density(concrete$age))
hist(concrete$fineagg, probability = T, breaks = 30)
lines(density(concrete$fineagg))
hist(concrete$coarseagg, probability = T, breaks = 30)
lines(density(concrete$coarseagg))
hist(concrete$superplastic, probability = T, breaks = 30)
lines(density(concrete$superplastic))
hist(concrete$water, probability = T, breaks = 30)
lines(density(concrete$water))
hist(concrete$ash, probability = T, breaks = 30)
lines(density(concrete$ash))
hist(concrete$slag, probability = T, breaks = 30)
lines(density(concrete$slag))
hist(concrete$cement, probability = T, breaks = 30)
lines(density(concrete$cement))
#normalize
normalize <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
concrete_norm <- as.data.frame(lapply(concrete,normalize))
summary(concrete_norm$strength)# normalized in the 0 to 1 range.
summary(concrete$strength)
#test and train
Train <- concrete_norm[1:700,]
Test <- concrete_norm[700:1030,]
library(neuralnet)
head(concrete)
set.seed(123)
concrete_model <- neuralnet(formula = strength~., data = Train)
plot(concrete_model)
library(NeuralNetTools)
par(mar=numeric(4),family='serif')
plotnet(concrete_model, alpha = 0.6)
#Evaluating model performance
model_result <- neuralnet::compute(concrete_model,Test[1:8])
predicted_strength <- model_result$net.result
View(predicted_strength)
cor(predicted_strength, Test$strength) #80.5
head(predicted_strength)
s_min <- min(concrete$strength)
s_min
s_max <- max(concrete$strength)
s_max
unnormalize <- function(x,min,max){
return((max-min)*x+min)
}
srength_pred <- unnormalize(predicted_strength, s_min, s_max)
head(srength_pred, n=10)
set.seed(12345)
con_model <- neuralnet(strength~., data= Train, hidden = 5,
act.fct = "logistic")
plot(con_model)
par(mar = numeric(4), family ='serif')
plotnet(con_model, alpha=0.6)
| /Concrete_NN_R.R | no_license | sindhu19460/Neural_Networks | R | false | false | 2,140 | r | concrete <- read.csv(file.choose())
str(concrete)
hist(concrete$strength, probability = T, breaks = 30)
lines(density(concrete$strength))
hist(concrete$age, probability = T, breaks = 30)
lines(density(concrete$age))
hist(concrete$fineagg, probability = T, breaks = 30)
lines(density(concrete$fineagg))
hist(concrete$coarseagg, probability = T, breaks = 30)
lines(density(concrete$coarseagg))
hist(concrete$superplastic, probability = T, breaks = 30)
lines(density(concrete$superplastic))
hist(concrete$water, probability = T, breaks = 30)
lines(density(concrete$water))
hist(concrete$ash, probability = T, breaks = 30)
lines(density(concrete$ash))
hist(concrete$slag, probability = T, breaks = 30)
lines(density(concrete$slag))
hist(concrete$cement, probability = T, breaks = 30)
lines(density(concrete$cement))
#normalize
normalize <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
concrete_norm <- as.data.frame(lapply(concrete,normalize))
summary(concrete_norm$strength)# normalized in the 0 to 1 range.
summary(concrete$strength)
#test and train
Train <- concrete_norm[1:700,]
Test <- concrete_norm[700:1030,]
library(neuralnet)
head(concrete)
set.seed(123)
concrete_model <- neuralnet(formula = strength~., data = Train)
plot(concrete_model)
library(NeuralNetTools)
par(mar=numeric(4),family='serif')
plotnet(concrete_model, alpha = 0.6)
#Evaluating model performance
model_result <- neuralnet::compute(concrete_model,Test[1:8])
predicted_strength <- model_result$net.result
View(predicted_strength)
cor(predicted_strength, Test$strength) #80.5
head(predicted_strength)
s_min <- min(concrete$strength)
s_min
s_max <- max(concrete$strength)
s_max
unnormalize <- function(x,min,max){
return((max-min)*x+min)
}
srength_pred <- unnormalize(predicted_strength, s_min, s_max)
head(srength_pred, n=10)
set.seed(12345)
con_model <- neuralnet(strength~., data= Train, hidden = 5,
act.fct = "logistic")
plot(con_model)
par(mar = numeric(4), family ='serif')
plotnet(con_model, alpha=0.6)
|
#TeamFI - we use an Elastic Net, Lasso, and XGB ensemble model to predict total international viewership.
library(randomForest)
library(mlbench)
library(caret)
library(e1071)
library(MASS)
library(ggparallel)
library(rpart)
library(rpart.plot)
library(RColorBrewer)
library(MatchIt)
library(splitstackshape)
library(gbm)
library(xgboost)
library(glmnet)
library(plyr)
library(dplyr)
library(lubridate)
library(readr)
library(car)
setwd('C:/Users/rchang/OneDrive - FI Consulting/Kaggle/NBA')
df <- read.csv('training_set.csv')
td <- read.csv('test_set.csv')
player <- read.csv('player_data.csv')
game <- read.csv('game_data.csv')
# top countries by viewership in the 2017-18 season
top_country <- c('C176','C181','C169','C155','C183','C47','C49','C191','C123','C143','C114','C185','C78','C75','C9','C150','C154','C211','C208','C134','C69','C33','C86','C41','C117','C12','C106','C215','C122','C116','C72','C65','C182','C199','C207','C202','C225','C80','C124','C10','C79','C130','C190','C50','C103','C97','C56','C131','C77','C8','C83','C218','C168','C153','C71','C101','C213')
#remove records that don't have top countries
df_small <- df[df$Country %in% top_country,]
df_small_2016 <- df_small[df_small$Season == "2016-17",]
df_small_2017 <- df_small[df_small$Season == "2017-18",]
country_avg_2016 <- aggregate(df_small_2016$Rounded.Viewers, list(df_small_2016$Country), mean)
country_avg_2017 <- aggregate(df_small_2017$Rounded.Viewers, list(df_small_2017$Country), mean)
#function to count the number of countries where a team's intl viewership exceeds the average
teamCountry_2016 <- function(teamName){
temp <- df_small_2016[df_small_2016$Home_Team == teamName,6:7]
temp_avg <- aggregate(temp$Rounded.Viewers, list(temp$Country), mean)
temp_country_avg <- merge(temp_avg,country_avg_2016,by="Group.1", all=TRUE)
temp_country_avg$diff <- (temp_country_avg$x.x - temp_country_avg$x.y) / temp_country_avg$x.y
return(temp_country_avg$diff)
}
teamCountry_2017 <- function(teamName){
temp <- df_small_2017[df_small_2017$Home_Team == teamName,6:7]
temp_avg <- aggregate(temp$Rounded.Viewers, list(temp$Country), mean)
temp_country_avg <- merge(temp_avg,country_avg_2017,by="Group.1", all=TRUE)
temp_country_avg$diff <- (temp_country_avg$x.x - temp_country_avg$x.y) / temp_country_avg$x.y
return(temp_country_avg$diff)
}
# loops through every team to get the number of countries where viewership exceeds average
team_list <- levels(df$Home_Team)
c_2016 <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(country_avg_2016)))
for(teamName in team_list){
temp_exceed <- teamCountry_2016(teamName)
c_2016 <- rbind(c_2016,temp_exceed)
}
c_2017 <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(country_avg_2017)))
for(teamName in team_list){
temp_exceed <- teamCountry_2017(teamName)
c_2017 <- rbind(c_2017,temp_exceed)
}
# clean-up, get rid of top row, rename columns, cbind list of team names, replace NA with 0 (assumption is that NAs perform at or below league average)
c_2016 = data.frame(c_2016[-1,])
colnames(c_2016) <- country_avg_2016$Group.1
team_exceed_2016 = cbind(team_list,c_2016)
team_exceed_2016[is.na(team_exceed_2016)] <- 0
c_2017 = data.frame(c_2017[-1,])
colnames(c_2017) <- country_avg_2017$Group.1
team_exceed_2017 = cbind(team_list,c_2017)
team_exceed_2017[is.na(team_exceed_2017)] <- 0
# find each games' "country score", a measure of country interest in the game
df_teams <- unique(df[,c("Season", "Away_Team", "Home_Team")])
df_teams <- mutate(df_teams, id = rownames(df_teams))
df_teams_2016 = df_teams[df_teams$Season == "2016-17", ]
df_teams_2017 = df_teams[df_teams$Season == "2017-18", ]
# idea is that we have to find the max of the home team and away team country scores to get an overall measure of intl viewership for a particular game
colnames(team_exceed_2016)[1] <- "Away_Team"
temp_away_2016 <- merge(df_teams_2016,team_exceed_2016,by="Away_Team")
colnames(team_exceed_2016)[1] <- "Home_Team"
temp_home_2016 <- merge(df_teams_2016,team_exceed_2016,by="Home_Team")
temp_all_2016 <- rbind(temp_away_2016,temp_home_2016)
temp_all_2016 <- subset(temp_all_2016,select=-c(Away_Team, Home_Team))
colnames(team_exceed_2017)[1] <- "Away_Team"
temp_away_2017 <- merge(df_teams_2017,team_exceed_2017,by="Away_Team")
colnames(team_exceed_2017)[1] <- "Home_Team"
temp_home_2017 <- merge(df_teams_2017,team_exceed_2017,by="Home_Team")
temp_all_2017 <- rbind(temp_away_2017,temp_home_2017)
temp_all_2017 <- subset(temp_all_2017,select=-c(Away_Team, Home_Team))
temp_all <- rbind(temp_all_2016, temp_all_2017)
temp_all <- temp_all[with(temp_all,order(id, Season)),]
exceed_sum <- temp_all %>% group_by(id, Season) %>% summarise_all(funs(mean))
merge_teams_exceed <- merge(df_teams,exceed_sum,by="id")
# now merge the exceed score for each game back to the train dataset. Country is not in the test dataset, so remove from train dataset
df2 <- ddply(df,.(Season, Game_ID, Game_Date, Away_Team, Home_Team),summarize,Total_Viewers=sum(Rounded.Viewers))
colnames(merge_teams_exceed)[2] <- "Season"
df2$month <- as.factor(month(as.POSIXlt(df2$Game_Date, format="%m/%d/%Y"))) # adding month as factor variable
df2$weekday <- as.factor(wday(as.POSIXlt(df2$Game_Date, format="%m/%d/%Y"))) # adding day of week as factor variable
df2$xmas <- 0 # for adding an xmas indicator variable, as the highest viewed games are on xmas
df2$xmas[as.POSIXlt(df2$Game_Date, format="%m/%d/%Y") == "2017-12-25"]=1
df2$xmas[as.POSIXlt(df2$Game_Date, format="%m/%d/%Y") == "2016-12-25"]=1
df3 <- merge(df2,merge_teams_exceed,by=c("Away_Team","Home_Team","Season"))
# work on dataframe of players, idea is that viewers increase/decrease if all-star players are playing in the game
player_small <- subset(player, select=c("Game_ID", "Name", "ASG_Team", "Active_Status"))
player_small <- player_small[player_small$ASG_Team != "None", ]
player_small <- player_small[player_small$Active_Status != "Inactive", ]
df_players <- as.data.frame(unique(player_small[,c("Name")]))
colnames(df_players)[1] <- "Name"
player_small$suited_up <- rep(1,nrow(player_small)) #indicator variable
# function to identify when all-star players are playing in a particular game
findPlayers <- function(gameID){
temp <- player_small[player_small$Game_ID == gameID,c("Name","suited_up")]
temp_suited_up <- merge(temp,df_players,by="Name", all=TRUE)
return(temp_suited_up$suited_up)
}
# loops through every game to find all-star players playing in that game
game_list <- unique(player_small[,c("Game_ID")])
p <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(df_players)))
for(gameID in game_list){
player_ind <- findPlayers(gameID)
p <- rbind(p,player_ind)
}
# clean-up, get rid of top row, rename columns, cbind list of gameIDs, replace NA with 0
p = data.frame(p[-1,])
df_players_char <- as.character(df_players$Name)
colnames(p) <- sort(df_players_char)
players_suited_up = cbind(game_list,p)
colnames(players_suited_up)[1] <- "Game_ID"
players_suited_up[is.na(players_suited_up)] <- 0
players_suited_up$num_all_stars <- rowSums(players_suited_up[,2:36])
# merge player indicators back to training datset
df4 <- merge(df3,players_suited_up,by=c("Game_ID"), all=TRUE)
df4<-df4[!(is.na(df4$Game_Date)),]
df4[is.na(df4)] <- 0
df4 <- subset(df4, select = -c(Game_ID, id, Season.y))
df4 <- subset(df4, select = -c(Game_Date)) #getting rid of game_date the variable is not realistic for future prediction
### now prepare the test dataset
td$month <- as.factor(month(as.POSIXlt(td$Game_Date, format="%m/%d/%Y"))) # adding month as factor variable
td$weekday <- as.factor(wday(as.POSIXlt(td$Game_Date, format="%m/%d/%Y"))) # adding day of week as factor variable
td$xmas <- 0 # for adding an xmas indicator variable, as the highest viewed games are on xmas
td$xmas[as.POSIXlt(td$Game_Date, format="%m/%d/%Y") == "2017-12-25"]=1
td$xmas[as.POSIXlt(td$Game_Date, format="%m/%d/%Y") == "2016-12-25"]=1
# loops through every team to get the number of countries where viewership exceeds average
team_list <- levels(td$Home_Team)
c_2016 <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(country_avg_2016)))
for(teamName in team_list){
temp_exceed <- teamCountry_2016(teamName)
c_2016 <- rbind(c_2016,temp_exceed)
}
c_2017 <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(country_avg_2017)))
for(teamName in team_list){
temp_exceed <- teamCountry_2017(teamName)
c_2017 <- rbind(c_2017,temp_exceed)
}
# clean-up, get rid of top row, rename columns, cbind list of team names, replace NA with 0 (assumption is that NAs perform at or below league average)
c_2016 = data.frame(c_2016[-1,])
colnames(c_2016) <- country_avg_2016$Group.1
team_exceed_2016 = cbind(team_list,c_2016)
team_exceed_2016[is.na(team_exceed_2016)] <- 0
c_2017 = data.frame(c_2017[-1,])
colnames(c_2017) <- country_avg_2017$Group.1
team_exceed_2017 = cbind(team_list,c_2017)
team_exceed_2017[is.na(team_exceed_2017)] <- 0
# find each games' "country score"
df_teams <- unique(td[,c("Season", "Away_Team", "Home_Team")])
df_teams <- mutate(df_teams, id = rownames(df_teams))
df_teams_2016 = df_teams[df_teams$Season == "2016-17", ]
df_teams_2017 = df_teams[df_teams$Season == "2017-18", ]
# idea is that we find the max of the home team and away team country scores to get an overall measure of intl viewership for a particular game
colnames(team_exceed_2016)[1] <- "Away_Team"
temp_away_2016 <- merge(df_teams_2016,team_exceed_2016,by="Away_Team")
colnames(team_exceed_2016)[1] <- "Home_Team"
temp_home_2016 <- merge(df_teams_2016,team_exceed_2016,by="Home_Team")
temp_all_2016 <- rbind(temp_away_2016,temp_home_2016)
temp_all_2016 <- subset(temp_all_2016,select=-c(Away_Team, Home_Team))
colnames(team_exceed_2017)[1] <- "Away_Team"
temp_away_2017 <- merge(df_teams_2017,team_exceed_2017,by="Away_Team")
colnames(team_exceed_2017)[1] <- "Home_Team"
temp_home_2017 <- merge(df_teams_2017,team_exceed_2017,by="Home_Team")
temp_all_2017 <- rbind(temp_away_2017,temp_home_2017)
temp_all_2017 <- subset(temp_all_2017,select=-c(Away_Team, Home_Team))
temp_all <- rbind(temp_all_2016, temp_all_2017)
temp_all <- temp_all[with(temp_all,order(id, Season)),]
exceed_sum <- temp_all %>% group_by(id, Season) %>% summarise_all(funs(mean))
merge_teams_exceed <- merge(df_teams,exceed_sum,by="id")
# now merge the exceed score for each game back to the test dataset.
colnames(merge_teams_exceed)[2] <- "Season"
td3 <- merge(td,merge_teams_exceed,by=c("Away_Team","Home_Team","Season"))
# now merge player indicators to the test dataset
td4 <- merge(td3,players_suited_up,by=c("Game_ID"), all=TRUE)
td4<-td4[!(is.na(td4$Game_Date)),]
td4[is.na(td4)] <- 0
td4 <- subset(td4, select = -c(Game_ID, id, Season.y))
td4 <- subset(td4, select = -c(Game_Date))
###
# prepare the train dataset for glmnet model
x_pred <- subset(df4, select = -c(Total_Viewers))
x_train <- model.matrix( ~ .-1, x_pred)
x_train <- as.data.frame(x_train)
# prepare the test dataset for glmnet model
y_pred <- subset(td4, select = -c(Total_Viewers))
y_train <- model.matrix( ~ .-1, y_pred)
y_train <- as.data.frame(y_train)
# game_date code - problem is the test dataset doesn't contain all the same game dates as train so it has fewer variables, will add those variables to test so it is same dimension as train
# additional_cols <- setdiff(names(x_train),names(y_train))
# dummy_cols <- as.data.frame(matrix(rep(0,nrow(y_train)*length(additional_cols)), ncol = length(additional_cols)))
# colnames(dummy_cols) <- additional_cols
# y_train <- cbind(y_train,dummy_cols)
# I don't see a need to have home and away indicators, will collapse them
x_train_away <- x_train[,1:30]
x_train_home <- x_train[,31:59]
x_train_home <- cbind(Home_TeamATL = 0, x_train_home) # Atlanta doesn't have a home team, have to manually add it
colnames(x_train_away) <- team_list
colnames(x_train_home) <- team_list
x_train_teams <- x_train_away + x_train_home
x_train <- x_train[-c(1:59)]
x_train <- cbind(x_train_teams, x_train)
y_train_away <- y_train[,1:30]
y_train_home <- y_train[,31:59]
y_train_home <- cbind(Home_TeamATL = 0, y_train_home) # Atlanta doesn't have a home team, have to manually add it
colnames(y_train_away) <- team_list
colnames(y_train_home) <- team_list
y_train_teams <- y_train_away + y_train_home
y_train <- y_train[-c(1:59)]
y_train <- cbind(y_train_teams, y_train)
## code to get interaction terms
f <- as.formula(y ~ .*.)
y <- df4$Total_Viewers
x_interact <- model.matrix(f, x_train)[, -1]
#x_interact <- as.matrix(x_interact)
## code to get interaction terms
f <- as.formula(y ~ .*.)
y <- td$Game_ID # dummy variable
y_interact <- model.matrix(f, y_train)[, -1]
#y_interact <- as.matrix(y_interact)
### estimate glmnet model for the train dataset
fit <-glmnet(x = x_interact, y = df4$Total_Viewers, alpha = 1)
#plot(fit, xvar = "lambda")
crossval <- cv.glmnet(x = x_interact, y = df4$Total_Viewers)
#plot(crossval)
penalty <- crossval$lambda.min #optimal lambda
penalty #minimal shrinkage
fit1 <-glmnet(x = x_interact, y = df4$Total_Viewers, alpha = 0.5, lambda = penalty ) #estimate the model with that
#coef(fit1)
vi <- varImp(fit1, lambda = fit1$lambda)
### use model to predict the test dataset
results <- as.data.frame(predict(object=fit1, y_interact))
pred <- cbind(td$Game_ID, results)
colnames(pred)<- c("Game_ID","Total_Viewers")
td_glmnet <- merge(pred, td, by="Game_ID")
td_glmnet <- subset(td_glmnet, select = -c(Total_Viewers.y))
colnames(td_glmnet)[2] <- "Total_Viewers"
#write.csv(td_glmnet, file = "test_glmnet_20180718.csv", row.names=FALSE)
## lasso regression
fit1 <-glmnet(x = x_interact, y = df4$Total_Viewers, alpha = 1, lambda = penalty ) #alpha = 1
results <- as.data.frame(predict(object=fit1, y_interact))
pred <- cbind(td$Game_ID, results)
colnames(pred)<- c("Game_ID","Total_Viewers")
td_lasso <- merge(pred, td, by="Game_ID")
td_lasso <- subset(td_lasso, select = -c(Total_Viewers.y))
colnames(td_lasso)[2] <- "Total_Viewers"
#write.csv(td_lasso, file = "test_lasso_20180718.csv", row.names=FALSE)
## now estimate the xgboost model
dtrain <- xgb.DMatrix(data = x_interact, label= df4$Total_Viewers) # convert our matrix into dmatrix object
xgb <- xgboost(data = dtrain, nround=25, objective = "reg:linear")
xgb_results <- predict(xgb, y_interact)
xgb_pred <- cbind(td$Game_ID, xgb_results)
colnames(xgb_pred)<- c("Game_ID","Total_Viewers")
td_xgb <- merge(xgb_pred, td, by="Game_ID")
td_xgb <- subset(td_xgb, select = -c(Total_Viewers.y))
colnames(td_xgb)[2] <- "Total_Viewers"
#write.csv(td_xgb, file = "test_xgb_20180718.csv", row.names=FALSE)
#average of all three methods to get final test dataset
td_final <- merge(td_glmnet,td_lasso,by=c("Game_ID","Season","Game_Date","Away_Team","Home_Team","month","weekday","xmas"))
td_final <- merge(td_final,td_xgb,by=c("Game_ID","Season","Game_Date","Away_Team","Home_Team","month","weekday","xmas"))
td_final$Total_Viewers_Final <- rowMeans(td_final[c('Total_Viewers.x', 'Total_Viewers.y','Total_Viewers')], na.rm=TRUE)
write.csv(td_final, file = "test_final_20180718.csv", row.names=FALSE)
| /nba_hackathon_TeamFI.R | no_license | juandixonformvp/nba_hackathon | R | false | false | 15,040 | r | #TeamFI - we use an Elastic Net, Lasso, and XGB ensemble model to predict total international viewership.
library(randomForest)
library(mlbench)
library(caret)
library(e1071)
library(MASS)
library(ggparallel)
library(rpart)
library(rpart.plot)
library(RColorBrewer)
library(MatchIt)
library(splitstackshape)
library(gbm)
library(xgboost)
library(glmnet)
library(plyr)
library(dplyr)
library(lubridate)
library(readr)
library(car)
setwd('C:/Users/rchang/OneDrive - FI Consulting/Kaggle/NBA')
df <- read.csv('training_set.csv')
td <- read.csv('test_set.csv')
player <- read.csv('player_data.csv')
game <- read.csv('game_data.csv')
# top countries by viewership in the 2017-18 season
top_country <- c('C176','C181','C169','C155','C183','C47','C49','C191','C123','C143','C114','C185','C78','C75','C9','C150','C154','C211','C208','C134','C69','C33','C86','C41','C117','C12','C106','C215','C122','C116','C72','C65','C182','C199','C207','C202','C225','C80','C124','C10','C79','C130','C190','C50','C103','C97','C56','C131','C77','C8','C83','C218','C168','C153','C71','C101','C213')
#remove records that don't have top countries
df_small <- df[df$Country %in% top_country,]
df_small_2016 <- df_small[df_small$Season == "2016-17",]
df_small_2017 <- df_small[df_small$Season == "2017-18",]
country_avg_2016 <- aggregate(df_small_2016$Rounded.Viewers, list(df_small_2016$Country), mean)
country_avg_2017 <- aggregate(df_small_2017$Rounded.Viewers, list(df_small_2017$Country), mean)
#function to count the number of countries where a team's intl viewership exceeds the average
teamCountry_2016 <- function(teamName){
temp <- df_small_2016[df_small_2016$Home_Team == teamName,6:7]
temp_avg <- aggregate(temp$Rounded.Viewers, list(temp$Country), mean)
temp_country_avg <- merge(temp_avg,country_avg_2016,by="Group.1", all=TRUE)
temp_country_avg$diff <- (temp_country_avg$x.x - temp_country_avg$x.y) / temp_country_avg$x.y
return(temp_country_avg$diff)
}
teamCountry_2017 <- function(teamName){
temp <- df_small_2017[df_small_2017$Home_Team == teamName,6:7]
temp_avg <- aggregate(temp$Rounded.Viewers, list(temp$Country), mean)
temp_country_avg <- merge(temp_avg,country_avg_2017,by="Group.1", all=TRUE)
temp_country_avg$diff <- (temp_country_avg$x.x - temp_country_avg$x.y) / temp_country_avg$x.y
return(temp_country_avg$diff)
}
# loops through every team to get the number of countries where viewership exceeds average
team_list <- levels(df$Home_Team)
c_2016 <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(country_avg_2016)))
for(teamName in team_list){
temp_exceed <- teamCountry_2016(teamName)
c_2016 <- rbind(c_2016,temp_exceed)
}
c_2017 <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(country_avg_2017)))
for(teamName in team_list){
temp_exceed <- teamCountry_2017(teamName)
c_2017 <- rbind(c_2017,temp_exceed)
}
# clean-up, get rid of top row, rename columns, cbind list of team names, replace NA with 0 (assumption is that NAs perform at or below league average)
c_2016 = data.frame(c_2016[-1,])
colnames(c_2016) <- country_avg_2016$Group.1
team_exceed_2016 = cbind(team_list,c_2016)
team_exceed_2016[is.na(team_exceed_2016)] <- 0
c_2017 = data.frame(c_2017[-1,])
colnames(c_2017) <- country_avg_2017$Group.1
team_exceed_2017 = cbind(team_list,c_2017)
team_exceed_2017[is.na(team_exceed_2017)] <- 0
# find each games' "country score", a measure of country interest in the game
df_teams <- unique(df[,c("Season", "Away_Team", "Home_Team")])
df_teams <- mutate(df_teams, id = rownames(df_teams))
df_teams_2016 = df_teams[df_teams$Season == "2016-17", ]
df_teams_2017 = df_teams[df_teams$Season == "2017-18", ]
# idea is that we have to find the max of the home team and away team country scores to get an overall measure of intl viewership for a particular game
colnames(team_exceed_2016)[1] <- "Away_Team"
temp_away_2016 <- merge(df_teams_2016,team_exceed_2016,by="Away_Team")
colnames(team_exceed_2016)[1] <- "Home_Team"
temp_home_2016 <- merge(df_teams_2016,team_exceed_2016,by="Home_Team")
temp_all_2016 <- rbind(temp_away_2016,temp_home_2016)
temp_all_2016 <- subset(temp_all_2016,select=-c(Away_Team, Home_Team))
colnames(team_exceed_2017)[1] <- "Away_Team"
temp_away_2017 <- merge(df_teams_2017,team_exceed_2017,by="Away_Team")
colnames(team_exceed_2017)[1] <- "Home_Team"
temp_home_2017 <- merge(df_teams_2017,team_exceed_2017,by="Home_Team")
temp_all_2017 <- rbind(temp_away_2017,temp_home_2017)
temp_all_2017 <- subset(temp_all_2017,select=-c(Away_Team, Home_Team))
temp_all <- rbind(temp_all_2016, temp_all_2017)
temp_all <- temp_all[with(temp_all,order(id, Season)),]
exceed_sum <- temp_all %>% group_by(id, Season) %>% summarise_all(funs(mean))
merge_teams_exceed <- merge(df_teams,exceed_sum,by="id")
# now merge the exceed score for each game back to the train dataset. Country is not in the test dataset, so remove from train dataset
df2 <- ddply(df,.(Season, Game_ID, Game_Date, Away_Team, Home_Team),summarize,Total_Viewers=sum(Rounded.Viewers))
colnames(merge_teams_exceed)[2] <- "Season"
df2$month <- as.factor(month(as.POSIXlt(df2$Game_Date, format="%m/%d/%Y"))) # adding month as factor variable
df2$weekday <- as.factor(wday(as.POSIXlt(df2$Game_Date, format="%m/%d/%Y"))) # adding day of week as factor variable
df2$xmas <- 0 # for adding an xmas indicator variable, as the highest viewed games are on xmas
df2$xmas[as.POSIXlt(df2$Game_Date, format="%m/%d/%Y") == "2017-12-25"]=1
df2$xmas[as.POSIXlt(df2$Game_Date, format="%m/%d/%Y") == "2016-12-25"]=1
df3 <- merge(df2,merge_teams_exceed,by=c("Away_Team","Home_Team","Season"))
# work on dataframe of players, idea is that viewers increase/decrease if all-star players are playing in the game
player_small <- subset(player, select=c("Game_ID", "Name", "ASG_Team", "Active_Status"))
player_small <- player_small[player_small$ASG_Team != "None", ]
player_small <- player_small[player_small$Active_Status != "Inactive", ]
df_players <- as.data.frame(unique(player_small[,c("Name")]))
colnames(df_players)[1] <- "Name"
player_small$suited_up <- rep(1,nrow(player_small)) #indicator variable
# function to identify when all-star players are playing in a particular game
findPlayers <- function(gameID){
temp <- player_small[player_small$Game_ID == gameID,c("Name","suited_up")]
temp_suited_up <- merge(temp,df_players,by="Name", all=TRUE)
return(temp_suited_up$suited_up)
}
# loops through every game to find all-star players playing in that game
game_list <- unique(player_small[,c("Game_ID")])
p <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(df_players)))
for(gameID in game_list){
player_ind <- findPlayers(gameID)
p <- rbind(p,player_ind)
}
# clean-up, get rid of top row, rename columns, cbind list of gameIDs, replace NA with 0
p = data.frame(p[-1,])
df_players_char <- as.character(df_players$Name)
colnames(p) <- sort(df_players_char)
players_suited_up = cbind(game_list,p)
colnames(players_suited_up)[1] <- "Game_ID"
players_suited_up[is.na(players_suited_up)] <- 0
players_suited_up$num_all_stars <- rowSums(players_suited_up[,2:36])
# merge player indicators back to training datset
df4 <- merge(df3,players_suited_up,by=c("Game_ID"), all=TRUE)
df4<-df4[!(is.na(df4$Game_Date)),]
df4[is.na(df4)] <- 0
df4 <- subset(df4, select = -c(Game_ID, id, Season.y))
df4 <- subset(df4, select = -c(Game_Date)) #getting rid of game_date the variable is not realistic for future prediction
### now prepare the test dataset
td$month <- as.factor(month(as.POSIXlt(td$Game_Date, format="%m/%d/%Y"))) # adding month as factor variable
td$weekday <- as.factor(wday(as.POSIXlt(td$Game_Date, format="%m/%d/%Y"))) # adding day of week as factor variable
td$xmas <- 0 # for adding an xmas indicator variable, as the highest viewed games are on xmas
td$xmas[as.POSIXlt(td$Game_Date, format="%m/%d/%Y") == "2017-12-25"]=1
td$xmas[as.POSIXlt(td$Game_Date, format="%m/%d/%Y") == "2016-12-25"]=1
# loops through every team to get the number of countries where viewership exceeds average
team_list <- levels(td$Home_Team)
c_2016 <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(country_avg_2016)))
for(teamName in team_list){
temp_exceed <- teamCountry_2016(teamName)
c_2016 <- rbind(c_2016,temp_exceed)
}
c_2017 <- as.data.frame(matrix(rep(0),nrow=1,ncol=nrow(country_avg_2017)))
for(teamName in team_list){
temp_exceed <- teamCountry_2017(teamName)
c_2017 <- rbind(c_2017,temp_exceed)
}
# clean-up, get rid of top row, rename columns, cbind list of team names, replace NA with 0 (assumption is that NAs perform at or below league average)
c_2016 = data.frame(c_2016[-1,])
colnames(c_2016) <- country_avg_2016$Group.1
team_exceed_2016 = cbind(team_list,c_2016)
team_exceed_2016[is.na(team_exceed_2016)] <- 0
c_2017 = data.frame(c_2017[-1,])
colnames(c_2017) <- country_avg_2017$Group.1
team_exceed_2017 = cbind(team_list,c_2017)
team_exceed_2017[is.na(team_exceed_2017)] <- 0
# find each games' "country score"
df_teams <- unique(td[,c("Season", "Away_Team", "Home_Team")])
df_teams <- mutate(df_teams, id = rownames(df_teams))
df_teams_2016 = df_teams[df_teams$Season == "2016-17", ]
df_teams_2017 = df_teams[df_teams$Season == "2017-18", ]
# idea is that we find the max of the home team and away team country scores to get an overall measure of intl viewership for a particular game
colnames(team_exceed_2016)[1] <- "Away_Team"
temp_away_2016 <- merge(df_teams_2016,team_exceed_2016,by="Away_Team")
colnames(team_exceed_2016)[1] <- "Home_Team"
temp_home_2016 <- merge(df_teams_2016,team_exceed_2016,by="Home_Team")
temp_all_2016 <- rbind(temp_away_2016,temp_home_2016)
temp_all_2016 <- subset(temp_all_2016,select=-c(Away_Team, Home_Team))
colnames(team_exceed_2017)[1] <- "Away_Team"
temp_away_2017 <- merge(df_teams_2017,team_exceed_2017,by="Away_Team")
colnames(team_exceed_2017)[1] <- "Home_Team"
temp_home_2017 <- merge(df_teams_2017,team_exceed_2017,by="Home_Team")
temp_all_2017 <- rbind(temp_away_2017,temp_home_2017)
temp_all_2017 <- subset(temp_all_2017,select=-c(Away_Team, Home_Team))
temp_all <- rbind(temp_all_2016, temp_all_2017)
temp_all <- temp_all[with(temp_all,order(id, Season)),]
exceed_sum <- temp_all %>% group_by(id, Season) %>% summarise_all(funs(mean))
merge_teams_exceed <- merge(df_teams,exceed_sum,by="id")
# now merge the exceed score for each game back to the test dataset.
colnames(merge_teams_exceed)[2] <- "Season"
td3 <- merge(td,merge_teams_exceed,by=c("Away_Team","Home_Team","Season"))
# now merge player indicators to the test dataset
td4 <- merge(td3,players_suited_up,by=c("Game_ID"), all=TRUE)
td4<-td4[!(is.na(td4$Game_Date)),]
td4[is.na(td4)] <- 0
td4 <- subset(td4, select = -c(Game_ID, id, Season.y))
td4 <- subset(td4, select = -c(Game_Date))
###
# prepare the train dataset for glmnet model
x_pred <- subset(df4, select = -c(Total_Viewers))
x_train <- model.matrix( ~ .-1, x_pred)
x_train <- as.data.frame(x_train)
# prepare the test dataset for glmnet model
y_pred <- subset(td4, select = -c(Total_Viewers))
y_train <- model.matrix( ~ .-1, y_pred)
y_train <- as.data.frame(y_train)
# game_date code - problem is the test dataset doesn't contain all the same game dates as train so it has fewer variables, will add those variables to test so it is same dimension as train
# additional_cols <- setdiff(names(x_train),names(y_train))
# dummy_cols <- as.data.frame(matrix(rep(0,nrow(y_train)*length(additional_cols)), ncol = length(additional_cols)))
# colnames(dummy_cols) <- additional_cols
# y_train <- cbind(y_train,dummy_cols)
# I don't see a need to have home and away indicators, will collapse them
x_train_away <- x_train[,1:30]
x_train_home <- x_train[,31:59]
x_train_home <- cbind(Home_TeamATL = 0, x_train_home) # Atlanta doesn't have a home team, have to manually add it
colnames(x_train_away) <- team_list
colnames(x_train_home) <- team_list
x_train_teams <- x_train_away + x_train_home
x_train <- x_train[-c(1:59)]
x_train <- cbind(x_train_teams, x_train)
y_train_away <- y_train[,1:30]
y_train_home <- y_train[,31:59]
y_train_home <- cbind(Home_TeamATL = 0, y_train_home) # Atlanta doesn't have a home team, have to manually add it
colnames(y_train_away) <- team_list
colnames(y_train_home) <- team_list
y_train_teams <- y_train_away + y_train_home
y_train <- y_train[-c(1:59)]
y_train <- cbind(y_train_teams, y_train)
## code to get interaction terms
f <- as.formula(y ~ .*.)
y <- df4$Total_Viewers
x_interact <- model.matrix(f, x_train)[, -1]
#x_interact <- as.matrix(x_interact)
## code to get interaction terms
f <- as.formula(y ~ .*.)
y <- td$Game_ID # dummy variable
y_interact <- model.matrix(f, y_train)[, -1]
#y_interact <- as.matrix(y_interact)
### estimate glmnet model for the train dataset
fit <-glmnet(x = x_interact, y = df4$Total_Viewers, alpha = 1)
#plot(fit, xvar = "lambda")
crossval <- cv.glmnet(x = x_interact, y = df4$Total_Viewers)
#plot(crossval)
penalty <- crossval$lambda.min #optimal lambda
penalty #minimal shrinkage
fit1 <-glmnet(x = x_interact, y = df4$Total_Viewers, alpha = 0.5, lambda = penalty ) #estimate the model with that
#coef(fit1)
vi <- varImp(fit1, lambda = fit1$lambda)
### use model to predict the test dataset
results <- as.data.frame(predict(object=fit1, y_interact))
pred <- cbind(td$Game_ID, results)
colnames(pred)<- c("Game_ID","Total_Viewers")
td_glmnet <- merge(pred, td, by="Game_ID")
td_glmnet <- subset(td_glmnet, select = -c(Total_Viewers.y))
colnames(td_glmnet)[2] <- "Total_Viewers"
#write.csv(td_glmnet, file = "test_glmnet_20180718.csv", row.names=FALSE)
## lasso regression
fit1 <-glmnet(x = x_interact, y = df4$Total_Viewers, alpha = 1, lambda = penalty ) #alpha = 1
results <- as.data.frame(predict(object=fit1, y_interact))
pred <- cbind(td$Game_ID, results)
colnames(pred)<- c("Game_ID","Total_Viewers")
td_lasso <- merge(pred, td, by="Game_ID")
td_lasso <- subset(td_lasso, select = -c(Total_Viewers.y))
colnames(td_lasso)[2] <- "Total_Viewers"
#write.csv(td_lasso, file = "test_lasso_20180718.csv", row.names=FALSE)
## now estimate the xgboost model
dtrain <- xgb.DMatrix(data = x_interact, label= df4$Total_Viewers) # convert our matrix into dmatrix object
xgb <- xgboost(data = dtrain, nround=25, objective = "reg:linear")
xgb_results <- predict(xgb, y_interact)
xgb_pred <- cbind(td$Game_ID, xgb_results)
colnames(xgb_pred)<- c("Game_ID","Total_Viewers")
td_xgb <- merge(xgb_pred, td, by="Game_ID")
td_xgb <- subset(td_xgb, select = -c(Total_Viewers.y))
colnames(td_xgb)[2] <- "Total_Viewers"
#write.csv(td_xgb, file = "test_xgb_20180718.csv", row.names=FALSE)
#average of all three methods to get final test dataset
td_final <- merge(td_glmnet,td_lasso,by=c("Game_ID","Season","Game_Date","Away_Team","Home_Team","month","weekday","xmas"))
td_final <- merge(td_final,td_xgb,by=c("Game_ID","Season","Game_Date","Away_Team","Home_Team","month","weekday","xmas"))
td_final$Total_Viewers_Final <- rowMeans(td_final[c('Total_Viewers.x', 'Total_Viewers.y','Total_Viewers')], na.rm=TRUE)
write.csv(td_final, file = "test_final_20180718.csv", row.names=FALSE)
|
#Loading the libraries in order to visualize the data
library(ggplot2)
library(ggmap)
US=map_data("state")
#Calculating the area of the US States and displaying it through the map with the help of a particular color code
Area=ggplot(merged_data, aes(map_id=stateName)) + geom_map(map=US, aes(fill=area),color="black")
Area=Area + expand_limits(x=US$long, y=US$lat) + ggtitle("Area of US states") + coord_map()
#Calculating the murder rate in the US and displaying it through the map with the help of a particular color code
murder=ggplot(merged_data, aes(map_id=stateName)) + geom_map(map=US, aes(fill=Murder),color="black")
murder=murder + expand_limits(x=US$long, y=US$lat) + ggtitle("Murder rate in US states") + coord_map()
| /Plotting.R | no_license | fall2018-wallace/hw7_ist687_drsharma | R | false | false | 732 | r |
#Loading the libraries in order to visualize the data
library(ggplot2)
library(ggmap)
US=map_data("state")
#Calculating the area of the US States and displaying it through the map with the help of a particular color code
Area=ggplot(merged_data, aes(map_id=stateName)) + geom_map(map=US, aes(fill=area),color="black")
Area=Area + expand_limits(x=US$long, y=US$lat) + ggtitle("Area of US states") + coord_map()
#Calculating the murder rate in the US and displaying it through the map with the help of a particular color code
murder=ggplot(merged_data, aes(map_id=stateName)) + geom_map(map=US, aes(fill=Murder),color="black")
murder=murder + expand_limits(x=US$long, y=US$lat) + ggtitle("Murder rate in US states") + coord_map()
|
#' Elastic net imputation
#'
#' `glmnet` is used to impute missing values.
#' For more details, see [glmnet][glmnet::glmnet()]
#'
#' @inheritParams impute_soft
#'
#' @param fit a list of lists of strings that contain formulas
#' from previously fitted imputation models. This input variable
#' is created using `data_ref` when `data_new = NULL`.
#'
#' @param df_min,df_max integer value designating the minimum
#' and maximum degrees of freedom in penalized regression models.
#'
#' @param df_stp integer value indicating step size for model
#' degrees of freedom between successive imputations.
#'
#' @export
#'
impute_net <- function(
data_ref,
data_new = NULL,
fit = NULL,
cols = dplyr::everything(),
df_min = 1,
df_max = 10,
df_stp = 1,
restore_data = TRUE,
verbose = 1
){
# keep_cols = columns to be imputed
keep_cols <- names(data_ref) %>%
tidyselect::vars_select(!!rlang::enquo(cols))
if(length(keep_cols) == 1) stop("1 column was selected (",
keep_cols, ") but 2+ are needed", call. = FALSE)
# if data_ref is given and nothing else
# --> create fits for data_ref, return fits + imputed data refs
# if data_ref/data_new are given, but no fits
# --> create fits for rbind(data_ref, data_new), return imputed data_news
# if data_ref/data_new + fits are given,
# --> same as above but use warm starts
# convert data frames into data.table objects if needed
if(!is.data.table(data_ref))
DT_ref <- as.data.table(data_ref)[, ..keep_cols]
else
DT_ref <- data_ref[, ..keep_cols]
# convert characters to factors
# modifying in place rather than copying data
# the code is less readable but more performant
if(any(sapply(DT_ref, is.character))){
chr_cols <- names(DT_ref)[sapply(DT_ref, is.character)]
DT_ref[, (chr_cols) := lapply(.SD, as.factor), .SDcols = chr_cols]
}
# variable types should be...
check_var_types(DT_ref, valid_types = c('numeric', 'integer', 'factor'))
# fill in missing data with means/modes for first iteration
fillers <- vector(mode = 'list', length = ncol(DT_ref))
names(fillers) <- names(DT_ref)
for(f in names(fillers)){
fillers[[f]] <- switch (get_var_type(DT_ref[[f]]),
'intg' = as.integer(round(mean(DT_ref[[f]], na.rm = TRUE))),
'ctns' = mean(DT_ref[[f]], na.rm = TRUE),
'catg' = mode_est(DT_ref[[f]]),
'bnry' = mode_est(DT_ref[[f]])
)
}
# initialize a null DT_new object in case there isn't any new data
DT_new <- NULL
data_new_supplied <- !is.null(data_new)
# repeat the code above for the testing data if it exists.
if(data_new_supplied){
if(is.null(fit)) stop("fit is needed to impute new data.\nRun",
" impute_net() with new_data=NULL to create fit", call.=FALSE)
# convert data frames into data.table objects if needed
if(!is.data.table(data_new))
DT_new <- as.data.table(data_new)[, ..keep_cols]
else
DT_new <- data_new[, ..keep_cols]
if(any(sapply(DT_new, is.character))){
chr_cols <- names(DT_new)[sapply(DT_new, is.character)]
DT_new[, (chr_cols) := lapply(.SD, as.factor), .SDcols = chr_cols]
}
# should have exactly the same names and types as reference data
check_data_new_names(DT_ref, DT_new)
check_data_new_types(DT_ref, DT_new)
}
DT <- DT_new %||% DT_ref
# need to keep empty cells for check_missingness to run correctly
miss_indx <- mindx(DT, drop_empty = FALSE)
if(is_empty(miss_indx)){
warning("There are no missing values to impute",
call. = FALSE)
return(data.table(
impute = seq(n_impute), df = NA, fit = NA, imputed_values = list(NULL)
))
}
# check for missing rows/columns
check_missingness(miss_indx, N = nrow(DT), P = ncol(DT),
label = 'data', new_data = data_new_supplied)
# drop empty cols from miss_indx
miss_indx <- drop_empty(miss_indx)
# don't need to fill in values where there are no missing values
fillers <- fillers[names(miss_indx)]
if(data_new_supplied){
impute_net_fit(
DT = DT,
fit = fit,
df_min = df_min,
df_max = df_max,
df_stp = df_stp,
fillers = fillers,
miss_indx = miss_indx,
restore_data = restore_data
)
} else {
impute_net_ref(
DT = DT,
df_min = df_min,
df_max = df_max,
df_stp = df_stp,
fillers = fillers,
miss_indx = miss_indx,
restore_data = restore_data
)
}
}
impute_net_fit <- function(
DT,
fit,
df_min = df_min,
df_max = df_max,
df_stp = df_stp,
fillers,
miss_indx,
restore_data,
niter = 3
) {
df_sequence <- unique(round(seq(df_min, df_max, by = df_stp)))
n_impute <- length(df_sequence)
DT <- fill_na(DT, vals = fillers, miss_indx, make_copy = FALSE)
fctr_info <- get_factor_info(DT)
.DT <- as.matrix(one_hot(DT))
# could this be optimized? make .miss_indx hold one hot factors
# and make miss_indx hold continuous variables? no overlap?
.miss_indx <- miss_indx
for (f in names(fctr_info$keys)) {
for (k in fctr_info$keys[[f]]) {
.miss_indx[[k]] <- miss_indx[[f]]
}
.miss_indx[[f]] <- NULL
}
imputed_values <- vector(mode = 'list', length = n_impute)
for(i in seq_along(imputed_values)) {
imputed_values[[i]] <- .miss_indx
}
for(k in seq(niter)){
for(i in seq_along(fit)){
col <- .col <- names(fit)[i]
vtype <- get_var_type(DT[[col]])
if(col %in% fctr_info$cols){
.col <- fctr_info$keys[[col]]
}
.xvars <- setdiff(colnames(.DT), .col)
for(j in seq_along(imputed_values)){
prd <- predict(fit[[col]],
s = fit[[col]]$lambda_ipa[j],
newx = .DT[miss_indx[[col]], .xvars],
type = if(vtype %in% c('ctns', 'intg')) 'response' else 'class'
)
if(vtype %in% c('catg', 'bnry')){
prd <- one_hot_chr(prd, fctr_info$keys[[col]])
}
if (k < niter) {
.DT[miss_indx[[col]], .col] <- prd
} else {
if(vtype %in% c('catg', 'bnry')){
for (h in seq_along(.col)) {
imputed_values[[j]][[.col[h]]] <- prd[, h]
}
} else if (vtype == 'ctns') {
imputed_values[[j]][[.col]] <- as.numeric(prd)
} else if (vtype == 'intg') {
imputed_values[[j]][[.col]] <- as.integer(round(prd))
}
}
}
}
}
if(restore_data){
# converting the imputed columns back into the format given in data.
# this should leave us with a list that can be directly plugged in.
imputed_values <- purrr::map(
.x = imputed_values,
.f = restore_vectypes,
data = DT,
impute_indx = miss_indx,
fctr_info = fctr_info
)
}
}
impute_net_ref <- function(
DT,
df_min,
df_max,
df_stp,
fillers,
miss_indx,
restore_data
) {
df_sequence <- unique(round(seq(df_min, df_max, by = df_stp)))
n_impute <- length(df_sequence)
DT <- fill_na(DT, vals = fillers, miss_indx, make_copy = FALSE)
# names with . in front indicate one-hot encoded data
# both needed - don't try to optimize
.DT <- as.matrix(one_hot(DT))
fctr_info <- get_factor_info(DT)
imputed_values <- vector(mode = 'list', length = n_impute)
fits <- vector(mode = 'list', length = length(miss_indx))
names(fits) <- names(miss_indx)
for(i in seq_along(imputed_values)) {
imputed_values[[i]] <- miss_indx
}
#impute_formulas <- imputed_values
for(impute_col in names(miss_indx)){
family <- switch (get_var_type(DT[[impute_col]]),
'intg' = 'gaussian',
'ctns' = 'gaussian',
'catg' = 'multinomial',
'bnry' = 'binomial'
)
.outcome <- if(impute_col %in% fctr_info$cols){
fctr_info$keys[[impute_col]]
} else {
impute_col
}
.predictors <- setdiff(colnames(.DT), .outcome)
fit <- glmnet::glmnet(
x = .DT[-miss_indx[[impute_col]], .predictors, drop = FALSE],
y = .DT[-miss_indx[[impute_col]], .outcome, drop = FALSE],
family = family,
nlambda = 100,
dfmax = min(df_max+10, length(.predictors))
)
# butchering
# fit$call <- NULL
# fit$dim <- NULL
# fit$dev.ratio <- NULL
# fit$nulldev <- NULL
# fit$npasses <- NULL
# fit$jerr <- NULL
# fit$nobs <- NULL
df_indx <- df_unique_indx(fit$df)
df_vals <- fit$df[df_indx]
df_keep <- between(df_vals, lower = df_min, upper = df_max)
df_indx_subset <- df_indx[df_keep]
lambda <- fit$lambda[df_indx_subset]
if(length(lambda) < n_impute){
ntimes <- n_impute - length(lambda)
lambda <- c(lambda, rep(lambda[length(lambda)], ntimes))
} else if(length(lambda) > n_impute){
lambda <- lambda[1:n_impute]
}
fit$lambda_ipa <- lambda
yh <- stats::predict(fit,
newx = .DT[miss_indx[[impute_col]], .predictors],
s = lambda,
type = 'response')
#cf <- stats::coef(fit, s = fit$lambda[df_indx])
fits[[impute_col]] <- fit
if(family == 'multinomial'){
#cfs <- lapply(cf, deparse_net_coef)
for(j in seq(n_impute)){
for(k in seq_along(fctr_info$lvls[[impute_col]])){
fctr_term <- fctr_info$keys[[impute_col]][k]
imputed_values[[j]][[fctr_term]] <- yh[, k, j]
#impute_formulas[[j]][[fctr_term]] <- cfs[[k]][[j]]
}
}
} else {
#cf <- deparse_net_coef(cf)
if(family == 'binomial'){
fctr_terms <- fctr_info$keys[[impute_col]]
for(j in seq(n_impute)){
imputed_values[[j]][[fctr_terms[1]]] <- 1 - yh[, j]
imputed_values[[j]][[fctr_terms[2]]] <- yh[, j]
# impute_formulas[[j]][[fctr_terms[1]]] <- paste("-1*(",cf[[j]],")")
# impute_formulas[[j]][[fctr_terms[2]]] <- cf[[j]]
}
} else {
for(j in seq(n_impute)){
imputed_values[[j]][[impute_col]] <- yh[, j]
#impute_formulas[[j]][[impute_col]] <- cf[[j]]
}
}
}
}
if(restore_data){
# converting the imputed columns back into the format given in data.
# this should leave us with a list that can be directly plugged in.
imputed_values <- purrr::map(
.x = imputed_values,
.f = restore_vectypes,
data = DT,
impute_indx = miss_indx,
fctr_info = fctr_info
)
}
data.table(
impute = seq(n_impute),
df = df_sequence,
fit = list(fits),
imputed_values = imputed_values
)
}
deparse_net_coef <- function(net_prd){
as.matrix(net_prd) %>%
as.data.table(keep.rownames = 'id') %>%
melt.data.table(
id.vars = 'id',
variable.name = 'impute',
value.name = 'coef'
) %>%
.[coef != 0] %>%
split(f = .$impute) %>%
lapply(FUN = function(dt){
var_names <- dt$id
var_coefs <- dt$coef
intrcpt <- which(var_names == '(Intercept)')
.intrcpt <- NULL
if(!is_empty(intrcpt)){
.intrcpt <- paste0(var_coefs[intrcpt])
var_names <- var_names[-intrcpt]
var_coefs <- var_coefs[-intrcpt]
if(!is_empty(var_names)) .intrcpt <- paste0(.intrcpt, ' + ')
}
formula_terms <- NULL
if(!is_empty(var_names)){
formula_terms <- paste(var_names, "*", var_coefs)
formula_terms <- paste(formula_terms, collapse = " + ")
}
paste0(.intrcpt, formula_terms)
})
}
| /R/impute_net.R | permissive | bcjaeger/ipa | R | false | false | 11,372 | r |
#' Elastic net imputation
#'
#' `glmnet` is used to impute missing values.
#' For more details, see [glmnet][glmnet::glmnet()]
#'
#' @inheritParams impute_soft
#'
#' @param fit a list of lists of strings that contain formulas
#' from previously fitted imputation models. This input variable
#' is created using `data_ref` when `data_new = NULL`.
#'
#' @param df_min,df_max integer value designating the minimum
#' and maximum degrees of freedom in penalized regression models.
#'
#' @param df_stp integer value indicating step size for model
#' degrees of freedom between successive imputations.
#'
#' @export
#'
impute_net <- function(
data_ref,
data_new = NULL,
fit = NULL,
cols = dplyr::everything(),
df_min = 1,
df_max = 10,
df_stp = 1,
restore_data = TRUE,
verbose = 1
){
# keep_cols = columns to be imputed
keep_cols <- names(data_ref) %>%
tidyselect::vars_select(!!rlang::enquo(cols))
if(length(keep_cols) == 1) stop("1 column was selected (",
keep_cols, ") but 2+ are needed", call. = FALSE)
# if data_ref is given and nothing else
# --> create fits for data_ref, return fits + imputed data refs
# if data_ref/data_new are given, but no fits
# --> create fits for rbind(data_ref, data_new), return imputed data_news
# if data_ref/data_new + fits are given,
# --> same as above but use warm starts
# convert data frames into data.table objects if needed
if(!is.data.table(data_ref))
DT_ref <- as.data.table(data_ref)[, ..keep_cols]
else
DT_ref <- data_ref[, ..keep_cols]
# convert characters to factors
# modifying in place rather than copying data
# the code is less readable but more performant
if(any(sapply(DT_ref, is.character))){
chr_cols <- names(DT_ref)[sapply(DT_ref, is.character)]
DT_ref[, (chr_cols) := lapply(.SD, as.factor), .SDcols = chr_cols]
}
# variable types should be...
check_var_types(DT_ref, valid_types = c('numeric', 'integer', 'factor'))
# fill in missing data with means/modes for first iteration
fillers <- vector(mode = 'list', length = ncol(DT_ref))
names(fillers) <- names(DT_ref)
for(f in names(fillers)){
fillers[[f]] <- switch (get_var_type(DT_ref[[f]]),
'intg' = as.integer(round(mean(DT_ref[[f]], na.rm = TRUE))),
'ctns' = mean(DT_ref[[f]], na.rm = TRUE),
'catg' = mode_est(DT_ref[[f]]),
'bnry' = mode_est(DT_ref[[f]])
)
}
# initialize a null DT_new object in case there isn't any new data
DT_new <- NULL
data_new_supplied <- !is.null(data_new)
# repeat the code above for the testing data if it exists.
if(data_new_supplied){
if(is.null(fit)) stop("fit is needed to impute new data.\nRun",
" impute_net() with new_data=NULL to create fit", call.=FALSE)
# convert data frames into data.table objects if needed
if(!is.data.table(data_new))
DT_new <- as.data.table(data_new)[, ..keep_cols]
else
DT_new <- data_new[, ..keep_cols]
if(any(sapply(DT_new, is.character))){
chr_cols <- names(DT_new)[sapply(DT_new, is.character)]
DT_new[, (chr_cols) := lapply(.SD, as.factor), .SDcols = chr_cols]
}
# should have exactly the same names and types as reference data
check_data_new_names(DT_ref, DT_new)
check_data_new_types(DT_ref, DT_new)
}
DT <- DT_new %||% DT_ref
# need to keep empty cells for check_missingness to run correctly
miss_indx <- mindx(DT, drop_empty = FALSE)
if(is_empty(miss_indx)){
warning("There are no missing values to impute",
call. = FALSE)
return(data.table(
impute = seq(n_impute), df = NA, fit = NA, imputed_values = list(NULL)
))
}
# check for missing rows/columns
check_missingness(miss_indx, N = nrow(DT), P = ncol(DT),
label = 'data', new_data = data_new_supplied)
# drop empty cols from miss_indx
miss_indx <- drop_empty(miss_indx)
# don't need to fill in values where there are no missing values
fillers <- fillers[names(miss_indx)]
if(data_new_supplied){
impute_net_fit(
DT = DT,
fit = fit,
df_min = df_min,
df_max = df_max,
df_stp = df_stp,
fillers = fillers,
miss_indx = miss_indx,
restore_data = restore_data
)
} else {
impute_net_ref(
DT = DT,
df_min = df_min,
df_max = df_max,
df_stp = df_stp,
fillers = fillers,
miss_indx = miss_indx,
restore_data = restore_data
)
}
}
impute_net_fit <- function(
DT,
fit,
df_min = df_min,
df_max = df_max,
df_stp = df_stp,
fillers,
miss_indx,
restore_data,
niter = 3
) {
df_sequence <- unique(round(seq(df_min, df_max, by = df_stp)))
n_impute <- length(df_sequence)
DT <- fill_na(DT, vals = fillers, miss_indx, make_copy = FALSE)
fctr_info <- get_factor_info(DT)
.DT <- as.matrix(one_hot(DT))
# could this be optimized? make .miss_indx hold one hot factors
# and make miss_indx hold continuous variables? no overlap?
.miss_indx <- miss_indx
for (f in names(fctr_info$keys)) {
for (k in fctr_info$keys[[f]]) {
.miss_indx[[k]] <- miss_indx[[f]]
}
.miss_indx[[f]] <- NULL
}
imputed_values <- vector(mode = 'list', length = n_impute)
for(i in seq_along(imputed_values)) {
imputed_values[[i]] <- .miss_indx
}
for(k in seq(niter)){
for(i in seq_along(fit)){
col <- .col <- names(fit)[i]
vtype <- get_var_type(DT[[col]])
if(col %in% fctr_info$cols){
.col <- fctr_info$keys[[col]]
}
.xvars <- setdiff(colnames(.DT), .col)
for(j in seq_along(imputed_values)){
prd <- predict(fit[[col]],
s = fit[[col]]$lambda_ipa[j],
newx = .DT[miss_indx[[col]], .xvars],
type = if(vtype %in% c('ctns', 'intg')) 'response' else 'class'
)
if(vtype %in% c('catg', 'bnry')){
prd <- one_hot_chr(prd, fctr_info$keys[[col]])
}
if (k < niter) {
.DT[miss_indx[[col]], .col] <- prd
} else {
if(vtype %in% c('catg', 'bnry')){
for (h in seq_along(.col)) {
imputed_values[[j]][[.col[h]]] <- prd[, h]
}
} else if (vtype == 'ctns') {
imputed_values[[j]][[.col]] <- as.numeric(prd)
} else if (vtype == 'intg') {
imputed_values[[j]][[.col]] <- as.integer(round(prd))
}
}
}
}
}
if(restore_data){
# converting the imputed columns back into the format given in data.
# this should leave us with a list that can be directly plugged in.
imputed_values <- purrr::map(
.x = imputed_values,
.f = restore_vectypes,
data = DT,
impute_indx = miss_indx,
fctr_info = fctr_info
)
}
}
impute_net_ref <- function(
DT,
df_min,
df_max,
df_stp,
fillers,
miss_indx,
restore_data
) {
df_sequence <- unique(round(seq(df_min, df_max, by = df_stp)))
n_impute <- length(df_sequence)
DT <- fill_na(DT, vals = fillers, miss_indx, make_copy = FALSE)
# names with . in front indicate one-hot encoded data
# both needed - don't try to optimize
.DT <- as.matrix(one_hot(DT))
fctr_info <- get_factor_info(DT)
imputed_values <- vector(mode = 'list', length = n_impute)
fits <- vector(mode = 'list', length = length(miss_indx))
names(fits) <- names(miss_indx)
for(i in seq_along(imputed_values)) {
imputed_values[[i]] <- miss_indx
}
#impute_formulas <- imputed_values
for(impute_col in names(miss_indx)){
family <- switch (get_var_type(DT[[impute_col]]),
'intg' = 'gaussian',
'ctns' = 'gaussian',
'catg' = 'multinomial',
'bnry' = 'binomial'
)
.outcome <- if(impute_col %in% fctr_info$cols){
fctr_info$keys[[impute_col]]
} else {
impute_col
}
.predictors <- setdiff(colnames(.DT), .outcome)
fit <- glmnet::glmnet(
x = .DT[-miss_indx[[impute_col]], .predictors, drop = FALSE],
y = .DT[-miss_indx[[impute_col]], .outcome, drop = FALSE],
family = family,
nlambda = 100,
dfmax = min(df_max+10, length(.predictors))
)
# butchering
# fit$call <- NULL
# fit$dim <- NULL
# fit$dev.ratio <- NULL
# fit$nulldev <- NULL
# fit$npasses <- NULL
# fit$jerr <- NULL
# fit$nobs <- NULL
df_indx <- df_unique_indx(fit$df)
df_vals <- fit$df[df_indx]
df_keep <- between(df_vals, lower = df_min, upper = df_max)
df_indx_subset <- df_indx[df_keep]
lambda <- fit$lambda[df_indx_subset]
if(length(lambda) < n_impute){
ntimes <- n_impute - length(lambda)
lambda <- c(lambda, rep(lambda[length(lambda)], ntimes))
} else if(length(lambda) > n_impute){
lambda <- lambda[1:n_impute]
}
fit$lambda_ipa <- lambda
yh <- stats::predict(fit,
newx = .DT[miss_indx[[impute_col]], .predictors],
s = lambda,
type = 'response')
#cf <- stats::coef(fit, s = fit$lambda[df_indx])
fits[[impute_col]] <- fit
if(family == 'multinomial'){
#cfs <- lapply(cf, deparse_net_coef)
for(j in seq(n_impute)){
for(k in seq_along(fctr_info$lvls[[impute_col]])){
fctr_term <- fctr_info$keys[[impute_col]][k]
imputed_values[[j]][[fctr_term]] <- yh[, k, j]
#impute_formulas[[j]][[fctr_term]] <- cfs[[k]][[j]]
}
}
} else {
#cf <- deparse_net_coef(cf)
if(family == 'binomial'){
fctr_terms <- fctr_info$keys[[impute_col]]
for(j in seq(n_impute)){
imputed_values[[j]][[fctr_terms[1]]] <- 1 - yh[, j]
imputed_values[[j]][[fctr_terms[2]]] <- yh[, j]
# impute_formulas[[j]][[fctr_terms[1]]] <- paste("-1*(",cf[[j]],")")
# impute_formulas[[j]][[fctr_terms[2]]] <- cf[[j]]
}
} else {
for(j in seq(n_impute)){
imputed_values[[j]][[impute_col]] <- yh[, j]
#impute_formulas[[j]][[impute_col]] <- cf[[j]]
}
}
}
}
if(restore_data){
# converting the imputed columns back into the format given in data.
# this should leave us with a list that can be directly plugged in.
imputed_values <- purrr::map(
.x = imputed_values,
.f = restore_vectypes,
data = DT,
impute_indx = miss_indx,
fctr_info = fctr_info
)
}
data.table(
impute = seq(n_impute),
df = df_sequence,
fit = list(fits),
imputed_values = imputed_values
)
}
deparse_net_coef <- function(net_prd){
as.matrix(net_prd) %>%
as.data.table(keep.rownames = 'id') %>%
melt.data.table(
id.vars = 'id',
variable.name = 'impute',
value.name = 'coef'
) %>%
.[coef != 0] %>%
split(f = .$impute) %>%
lapply(FUN = function(dt){
var_names <- dt$id
var_coefs <- dt$coef
intrcpt <- which(var_names == '(Intercept)')
.intrcpt <- NULL
if(!is_empty(intrcpt)){
.intrcpt <- paste0(var_coefs[intrcpt])
var_names <- var_names[-intrcpt]
var_coefs <- var_coefs[-intrcpt]
if(!is_empty(var_names)) .intrcpt <- paste0(.intrcpt, ' + ')
}
formula_terms <- NULL
if(!is_empty(var_names)){
formula_terms <- paste(var_names, "*", var_coefs)
formula_terms <- paste(formula_terms, collapse = " + ")
}
paste0(.intrcpt, formula_terms)
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cropImage.R
\name{cropImage}
\alias{cropImage}
\title{crop a sub-image via a mask}
\usage{
cropImage(image, labelImage, label = 1)
}
\arguments{
\item{image}{antsImage to crop}
\item{labelImage}{antsImage with label values.
If not supplied, estimated from data.}
\item{label}{the label value to use}
}
\value{
subimage
}
\description{
uses a label image to crop a smaller image from within a larger image
}
\examples{
fi <- antsImageRead(getANTsRData("r16"))
cropped <- cropImage(fi)
cropped <- cropImage(fi, fi, 250)
}
\author{
Brian B. Avants, Nicholas J. Tustison
}
\keyword{crop,}
\keyword{extract}
\keyword{sub-image}
| /man/cropImage.Rd | permissive | alainlompo/ANTsR | R | false | true | 706 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cropImage.R
\name{cropImage}
\alias{cropImage}
\title{crop a sub-image via a mask}
\usage{
cropImage(image, labelImage, label = 1)
}
\arguments{
\item{image}{antsImage to crop}
\item{labelImage}{antsImage with label values.
If not supplied, estimated from data.}
\item{label}{the label value to use}
}
\value{
subimage
}
\description{
uses a label image to crop a smaller image from within a larger image
}
\examples{
fi <- antsImageRead(getANTsRData("r16"))
cropped <- cropImage(fi)
cropped <- cropImage(fi, fi, 250)
}
\author{
Brian B. Avants, Nicholas J. Tustison
}
\keyword{crop,}
\keyword{extract}
\keyword{sub-image}
|
#' Title
#'
#' When you are looking to filter out items that appear in another dataframe,
#' negating an %in% filter returns all records if you mistakenly refer to a column that
#' does not exist in the comparison dataframe. This function protects against code
#' running smoothly and the risk of missing the mistake by throwing an error.
#'
#'
#' @param .data your dataframe
#' @param OtherDf another data frame that you are comparing records with
#' @param columnInData the column in your original dataframe that will be the basis of the filter
#' @param columnInOtherDf the corresponding column found in the dataframe that you are comparing to
#'
#' @return
#' @export
#'
#' @examples
#'
#' Say you have a dataframe named otherdata with a list of students you want omitted from your data,
#' but mistakenly refer to the ID column in otherdata as ID, when the name in otherdata is Student_ID.
#' #data%>%
#' #filter(!(ID %in% otherdata$ID))
#'
#' The above will run, but no filtering would take place. No IDs appear in a column that doesn't exist,
#' so filter() is happy to give you your original data back! There is no check in place to ensure that otherdata$ID is even there!
#'
#' With this function,
#'
#' #data%>%
#' #filter_not_in(otherdf,ID,ID)
#'
#' fails! Protecting from this type of error.
#'
#' #data%>%
#' #filter_not_in(otherdata,ID,Student_ID)
filter_not_in<-function(.data,OtherDf,columnInData,columnInOtherDf){
columnInData<-rlang::ensym(columnInData)
columnInOtherDf<-rlang::ensym(columnInOtherDf)
.data%>%
filter({stopifnot(exists(as.character(columnInOtherDf), where = OtherDf)); !(!!columnInData %in% OtherDf[,names(OtherDf) %in% c(columnInOtherDf)])})
}
| /R/filter_not_in.R | permissive | patdkeller/instR | R | false | false | 1,694 | r | #' Title
#'
#' When you are looking to filter out items that appear in another dataframe,
#' negating an %in% filter returns all records if you mistakenly refer to a column that
#' does not exist in the comparison dataframe. This function protects against code
#' running smoothly and the risk of missing the mistake by throwing an error.
#'
#'
#' @param .data your dataframe
#' @param OtherDf another data frame that you are comparing records with
#' @param columnInData the column in your original dataframe that will be the basis of the filter
#' @param columnInOtherDf the corresponding column found in the dataframe that you are comparing to
#'
#' @return
#' @export
#'
#' @examples
#'
#' Say you have a dataframe named otherdata with a list of students you want omitted from your data,
#' but mistakenly refer to the ID column in otherdata as ID, when the name in otherdata is Student_ID.
#' #data%>%
#' #filter(!(ID %in% otherdata$ID))
#'
#' The above will run, but no filtering would take place. No IDs appear in a column that doesn't exist,
#' so filter() is happy to give you your original data back! There is no check in place to ensure that otherdata$ID is even there!
#'
#' With this function,
#'
#' #data%>%
#' #filter_not_in(otherdf,ID,ID)
#'
#' fails! Protecting from this type of error.
#'
#' #data%>%
#' #filter_not_in(otherdata,ID,Student_ID)
filter_not_in<-function(.data,OtherDf,columnInData,columnInOtherDf){
columnInData<-rlang::ensym(columnInData)
columnInOtherDf<-rlang::ensym(columnInOtherDf)
.data%>%
filter({stopifnot(exists(as.character(columnInOtherDf), where = OtherDf)); !(!!columnInData %in% OtherDf[,names(OtherDf) %in% c(columnInOtherDf)])})
}
|
## Generating Plot 3
setwd(choose.dir())
datafile <- file("household_power_consumption.txt")
HPC <- read.table(text = grep("^[1,2]/2/2007", readLines(datafile), value = TRUE),col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), sep = ";", header = TRUE)
png(file="plot3.png",width = 480, height = 480, units = "px")
with(HPC, {plot(Sub_metering_1 ~ Datetime, type = "l",ylab = "Global Active Power (kilowatts)", xlab = "")
lines(Sub_metering_2 ~ Datetime, col = 'Red')
lines(Sub_metering_3 ~ Datetime, col = 'Blue')
})
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 2,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
| /project1-plot3.R | no_license | MonHab/ExData_Plotting1 | R | false | false | 836 | r |
## Generating Plot 3
setwd(choose.dir())
datafile <- file("household_power_consumption.txt")
HPC <- read.table(text = grep("^[1,2]/2/2007", readLines(datafile), value = TRUE),col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), sep = ";", header = TRUE)
png(file="plot3.png",width = 480, height = 480, units = "px")
with(HPC, {plot(Sub_metering_1 ~ Datetime, type = "l",ylab = "Global Active Power (kilowatts)", xlab = "")
lines(Sub_metering_2 ~ Datetime, col = 'Red')
lines(Sub_metering_3 ~ Datetime, col = 'Blue')
})
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 2,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
V_W1 <- read.table("V_W1.txt")
xrange <- range(0,500)
yrange <- range(0.12,0.4)
plot(xrange,yrange,type="n",ann=FALSE)
lines(V_W1, type="l", col="red")
V_W1b <-read.table("../2/V_W1.txt")
lines(V_W1b, type="l", col="blue")
V_W1c <-read.table("../3/V_W1.txt")
lines(V_W1c, type="l", col="black")
V_W1d <-read.table("../4/V_W1.txt")
lines(V_W1d, type="l", col="green")
V_W1e <-read.table("../5/V_W1.txt")
lines(V_W1e, type="l", col="purple")
title(main="Taxa = 0,1", col.main="black", font.main=4)
title(xlab="Rodadas", col.main="black")
title(ylab="Var(W1)", col.main="black")
legend(max(xrange)/5, max(yrange), c("1","2","3","4","5"), cex=0.8, col=c("red","blue","black","green","purple"),title="Amostras", lty=1)
| /dados e graficos/taxa0.1/300 clientes, 500 rodadas/gerar graficos(V_W1) .R | no_license | rocanaan/proj-aguiar-2011 | R | false | false | 731 | r | V_W1 <- read.table("V_W1.txt")
xrange <- range(0,500)
yrange <- range(0.12,0.4)
plot(xrange,yrange,type="n",ann=FALSE)
lines(V_W1, type="l", col="red")
V_W1b <-read.table("../2/V_W1.txt")
lines(V_W1b, type="l", col="blue")
V_W1c <-read.table("../3/V_W1.txt")
lines(V_W1c, type="l", col="black")
V_W1d <-read.table("../4/V_W1.txt")
lines(V_W1d, type="l", col="green")
V_W1e <-read.table("../5/V_W1.txt")
lines(V_W1e, type="l", col="purple")
title(main="Taxa = 0,1", col.main="black", font.main=4)
title(xlab="Rodadas", col.main="black")
title(ylab="Var(W1)", col.main="black")
legend(max(xrange)/5, max(yrange), c("1","2","3","4","5"), cex=0.8, col=c("red","blue","black","green","purple"),title="Amostras", lty=1)
|
#!/usr/local/bin/Rscript
system("clear");
cat("Please wait! The process is ongoing...\n");
invisible(Sys.setlocale("LC_ALL", 'en_US.UTF-8'))
options(warn=-1)
pck_ <- c("rvest","stringr","data.table","corpus","qdapRegex","countrycode","priceR")
pck <- pck_[!(pck_ %in% installed.packages()[,"Package"])]
if(length(pck)){
cat(paste0("Installing: ", pck, "\n"))
install.packages(pck, repos = 'https://cran.rstudio.com/')
}
suppressWarnings(suppressMessages(invisible(lapply(pck_, require, character.only = TRUE))))
all_cities <- fread("data/cities.csv") %>% as.data.frame()
system("clear");
cat("Enter the city: ");
city_1 <- readLines("stdin",n=1);
if(!is.na(grep(str_to_title(city_1), all_cities$city) || grep(str_to_title(city_1), all_cities$alternative))){
if(length(grep(str_to_title(city_1), all_cities$city)) && is.na(all_cities[grep(str_to_title(city_1), all_cities$city), "alternative"])){
city_link_1 <- str_to_title(city_1)
country_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$city),"country"] %>% trimws()
}else if(length(grep(str_to_title(city_1),all_cities$city)) > 0 && !is.na(all_cities[grep(str_to_title(city_1),all_cities$city), "alternative"])){
city_link_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$city),]
city_link_1 <- paste0(trimws(city_link_1[,1]),"+%28",trimws(city_link_1[,2]),"%29&")
country_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$city),"country"] %>% trimws()
}else if(length(grep(str_to_title(city_1),all_cities$city)) == 0 && length(grep(str_to_title(city_1),all_cities$alternative)) > 0){
city_link_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$alternative),]
city_link_1 <- paste0(trimws(city_link_1[,1]),"+%28",trimws(city_link_1[,2]),"%29&")
country_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$alternative),"country"] %>% trimws()
}else{
stop("An error occurred. Try again!")
}
}else{
stop("This city is not available. Please enter another city!")
}
country_1 <- ifelse(city_link_1 == "London", "United Kingdom", country_1)
currency_ <- countrycode(country_1,origin = 'country.name',destination = "iso4217c")
if(length(strsplit(city_link_1, " ")[[1]]) > 1){
city_link_1 <- paste0(strsplit(city_link_1, " ")[[1]], collapse = "+")
}else{
city_link_1 <- city_link_1
}
if(length(strsplit(country_1, " ")[[1]]) > 1){
country_1 <- paste0(strsplit(country_1, " ")[[1]], collapse = "+")
}else{
country_1 <- country_1
}
if(country_1 == "United+States"){
city_link_1 <- paste0(city_link_1,"%2C+",all_cities[grep(pattern=str_to_title(city_1), all_cities$city),"state"] %>% trimws())
}else{
city_link_1 <- city_link_1
}
cat(paste0("Enter the amount to compare (in ", currency_, "): "));
amount_ <- readLines("stdin",n=1);
cat("Enter the city to compare: ");
city_2 <- readLines("stdin",n=1);
if(!is.na(grep(str_to_title(city_2), all_cities$city) || grep(str_to_title(city_2), all_cities$alternative))){
if(length(grep(str_to_title(city_2), all_cities$city)) && is.na(all_cities[grep(str_to_title(city_2), all_cities$city), "alternative"])){
city_link_2 <- str_to_title(city_2)
country_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$city),"country"] %>% trimws()
}else if(length(grep(str_to_title(city_2),all_cities$city)) > 0 && !is.na(all_cities[grep(str_to_title(city_2),all_cities$city), "alternative"])){
city_link_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$city),]
city_link_2 <- paste0(trimws(city_link_2[,1]),"+%28",trimws(city_link_2[,2]),"%29&")
country_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$city),"country"] %>% trimws()
}else if(length(grep(str_to_title(city_2),all_cities$city)) == 0 && length(grep(str_to_title(city_2),all_cities$alternative)) > 0){
city_link_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$alternative),]
city_link_2 <- paste0(trimws(city_link_2[,1]),"+%28",trimws(city_link_2[,2]),"%29&")
country_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$alternative),"country"] %>% trimws()
}else{
stop("An error occurred. Try again!")
}
}else{
stop("This city is not available. Please enter another city!")
}
country_2 <- ifelse(city_link_2 == "London", "United Kingdom", country_2)
if(length(strsplit(city_link_2, " ")[[1]]) > 1){
city_link_2 <- paste0(strsplit(city_link_2, " ")[[1]], collapse = "+")
}else{
city_link_2 <- city_link_2
}
if(country_2 == "United+States"){
city_link_2 <- paste0(city_link_2,"%2C+",all_cities[grep(pattern=str_to_title(city_2), all_cities$city),"state"] %>% trimws())
}else{
city_link_2 <- city_link_2
}
if(length(strsplit(country_2, " ")[[1]]) > 1){
country_2 <- paste0(strsplit(country_2, " ")[[1]], collapse = "+")
}else{
country_2 <- country_2
}
start_ <- Sys.time()
url_ <- paste0("https://www.numbeo.com/cost-of-living/compare_cities.jsp?country1=",country_1,"&city1=",city_link_1,"&country2=",country_2,"&city2=",city_link_2,"&amount=",amount_,"&displayCurrency=",currency_)
html <- read_html(url_)
sum_ <- as.character(text_split(html %>% html_nodes('.summary_big') %>% html_text() %>% str_trim() %>% unlist(), "sentences")[1,3])
if(country_1 != country_2){
sum_price <- rm_between(rm_between(sum_, 'would need around', 'in', extract=TRUE)[[1]], '(', ')', extract=TRUE)[[1]]
sum_price <- gsub(",","",sum_price)
}else{
sum_price <- rm_between(sum_, 'would need around', 'in', extract=TRUE)[[1]]
sum_price <- gsub(",","",sum_price)
}
sum_price <- ifelse(substr(sum_price,nchar(sum_price),nchar(sum_price)) == ".",
substr(sum_price,1,nchar(sum_price)-1),
sum_price)
price_2 <- as.numeric(gsub('[^-0-9.]', '', sum_price))
currency_2 <- countrycode(country_2,origin = 'country.name',destination = "iso4217c")
invisible(capture.output(price2_inbase <- suppressWarnings(suppressMessages(invisible(exchange_rate_latest(currency = currency_2))))))
invisible(capture.output(price2_inbase <- exchange_rate_latest(currency = currency_2)))
price2_inbase <- round(price_2*(price2_inbase[which(price2_inbase$currency == currency_),2]),2)
invisible(capture.output(fx_usd <- exchange_rate_latest()))
price1_usd <- as.numeric(fx_usd[which(fx_usd$currency == currency_),2])
price2_usd <- as.numeric(fx_usd[which(fx_usd$currency == currency_2),2])
system("clear");
if(country_1 == country_2){
cat(" Date: ", format(as.Date(Sys.Date(),origin="1970-01-01")),"\n",
"Source city: ", str_to_title(city_1),"\n",
"Compared city: ", str_to_title(city_2),"\n",
"Amount: ", amount_, currency_,"in",str_to_title(city_1),"\n",
paste0("\nYou need to have ", paste(price_2, currency_2), " in ", str_to_title(city_2)," to maintain the same life in ",str_to_title(city_1)," with ",amount_," ",currency_,"."),"\n",
"\nEarning rate (City 2 / City 1): ", round((price_2/price2_usd)/(as.numeric(amount_)/as.numeric(price1_usd)),2),"\n")
}else{
cat(" Date: ", format(as.Date(Sys.Date(),origin="1970-01-01")),"\n",
"Source city: ", str_to_title(city_1),"\n",
"Compared city: ", str_to_title(city_2),"\n",
"Amount: ", amount_, currency_,"in",str_to_title(city_1),"\n",
"\nYou need to have", paste0(paste(price_2, currency_2), " (",paste(price2_inbase, currency_), ") in ", str_to_title(city_2)," to maintain the same life in ",str_to_title(city_1)," with ",amount_," ",currency_),"\n",
"\nEarning rate (City 2 / City 1): ", round((price_2/price2_usd)/(as.numeric(amount_)/as.numeric(price1_usd)),2),"\n")
}
end_ <- Sys.time()
time_ <- end_ - start_
cat("\n\nProcess time: ",round(time_,2), " seconds.\n\n")
| /cost.R | no_license | egeor90/cost_of_living | R | false | false | 7,697 | r | #!/usr/local/bin/Rscript
system("clear");
cat("Please wait! The process is ongoing...\n");
invisible(Sys.setlocale("LC_ALL", 'en_US.UTF-8'))
options(warn=-1)
pck_ <- c("rvest","stringr","data.table","corpus","qdapRegex","countrycode","priceR")
pck <- pck_[!(pck_ %in% installed.packages()[,"Package"])]
if(length(pck)){
cat(paste0("Installing: ", pck, "\n"))
install.packages(pck, repos = 'https://cran.rstudio.com/')
}
suppressWarnings(suppressMessages(invisible(lapply(pck_, require, character.only = TRUE))))
all_cities <- fread("data/cities.csv") %>% as.data.frame()
system("clear");
cat("Enter the city: ");
city_1 <- readLines("stdin",n=1);
if(!is.na(grep(str_to_title(city_1), all_cities$city) || grep(str_to_title(city_1), all_cities$alternative))){
if(length(grep(str_to_title(city_1), all_cities$city)) && is.na(all_cities[grep(str_to_title(city_1), all_cities$city), "alternative"])){
city_link_1 <- str_to_title(city_1)
country_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$city),"country"] %>% trimws()
}else if(length(grep(str_to_title(city_1),all_cities$city)) > 0 && !is.na(all_cities[grep(str_to_title(city_1),all_cities$city), "alternative"])){
city_link_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$city),]
city_link_1 <- paste0(trimws(city_link_1[,1]),"+%28",trimws(city_link_1[,2]),"%29&")
country_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$city),"country"] %>% trimws()
}else if(length(grep(str_to_title(city_1),all_cities$city)) == 0 && length(grep(str_to_title(city_1),all_cities$alternative)) > 0){
city_link_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$alternative),]
city_link_1 <- paste0(trimws(city_link_1[,1]),"+%28",trimws(city_link_1[,2]),"%29&")
country_1 <- all_cities[grep(pattern=str_to_title(city_1), all_cities$alternative),"country"] %>% trimws()
}else{
stop("An error occurred. Try again!")
}
}else{
stop("This city is not available. Please enter another city!")
}
country_1 <- ifelse(city_link_1 == "London", "United Kingdom", country_1)
currency_ <- countrycode(country_1,origin = 'country.name',destination = "iso4217c")
if(length(strsplit(city_link_1, " ")[[1]]) > 1){
city_link_1 <- paste0(strsplit(city_link_1, " ")[[1]], collapse = "+")
}else{
city_link_1 <- city_link_1
}
if(length(strsplit(country_1, " ")[[1]]) > 1){
country_1 <- paste0(strsplit(country_1, " ")[[1]], collapse = "+")
}else{
country_1 <- country_1
}
if(country_1 == "United+States"){
city_link_1 <- paste0(city_link_1,"%2C+",all_cities[grep(pattern=str_to_title(city_1), all_cities$city),"state"] %>% trimws())
}else{
city_link_1 <- city_link_1
}
cat(paste0("Enter the amount to compare (in ", currency_, "): "));
amount_ <- readLines("stdin",n=1);
cat("Enter the city to compare: ");
city_2 <- readLines("stdin",n=1);
if(!is.na(grep(str_to_title(city_2), all_cities$city) || grep(str_to_title(city_2), all_cities$alternative))){
if(length(grep(str_to_title(city_2), all_cities$city)) && is.na(all_cities[grep(str_to_title(city_2), all_cities$city), "alternative"])){
city_link_2 <- str_to_title(city_2)
country_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$city),"country"] %>% trimws()
}else if(length(grep(str_to_title(city_2),all_cities$city)) > 0 && !is.na(all_cities[grep(str_to_title(city_2),all_cities$city), "alternative"])){
city_link_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$city),]
city_link_2 <- paste0(trimws(city_link_2[,1]),"+%28",trimws(city_link_2[,2]),"%29&")
country_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$city),"country"] %>% trimws()
}else if(length(grep(str_to_title(city_2),all_cities$city)) == 0 && length(grep(str_to_title(city_2),all_cities$alternative)) > 0){
city_link_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$alternative),]
city_link_2 <- paste0(trimws(city_link_2[,1]),"+%28",trimws(city_link_2[,2]),"%29&")
country_2 <- all_cities[grep(pattern=str_to_title(city_2), all_cities$alternative),"country"] %>% trimws()
}else{
stop("An error occurred. Try again!")
}
}else{
stop("This city is not available. Please enter another city!")
}
country_2 <- ifelse(city_link_2 == "London", "United Kingdom", country_2)
if(length(strsplit(city_link_2, " ")[[1]]) > 1){
city_link_2 <- paste0(strsplit(city_link_2, " ")[[1]], collapse = "+")
}else{
city_link_2 <- city_link_2
}
if(country_2 == "United+States"){
city_link_2 <- paste0(city_link_2,"%2C+",all_cities[grep(pattern=str_to_title(city_2), all_cities$city),"state"] %>% trimws())
}else{
city_link_2 <- city_link_2
}
if(length(strsplit(country_2, " ")[[1]]) > 1){
country_2 <- paste0(strsplit(country_2, " ")[[1]], collapse = "+")
}else{
country_2 <- country_2
}
start_ <- Sys.time()
url_ <- paste0("https://www.numbeo.com/cost-of-living/compare_cities.jsp?country1=",country_1,"&city1=",city_link_1,"&country2=",country_2,"&city2=",city_link_2,"&amount=",amount_,"&displayCurrency=",currency_)
html <- read_html(url_)
sum_ <- as.character(text_split(html %>% html_nodes('.summary_big') %>% html_text() %>% str_trim() %>% unlist(), "sentences")[1,3])
if(country_1 != country_2){
sum_price <- rm_between(rm_between(sum_, 'would need around', 'in', extract=TRUE)[[1]], '(', ')', extract=TRUE)[[1]]
sum_price <- gsub(",","",sum_price)
}else{
sum_price <- rm_between(sum_, 'would need around', 'in', extract=TRUE)[[1]]
sum_price <- gsub(",","",sum_price)
}
sum_price <- ifelse(substr(sum_price,nchar(sum_price),nchar(sum_price)) == ".",
substr(sum_price,1,nchar(sum_price)-1),
sum_price)
price_2 <- as.numeric(gsub('[^-0-9.]', '', sum_price))
currency_2 <- countrycode(country_2,origin = 'country.name',destination = "iso4217c")
invisible(capture.output(price2_inbase <- suppressWarnings(suppressMessages(invisible(exchange_rate_latest(currency = currency_2))))))
invisible(capture.output(price2_inbase <- exchange_rate_latest(currency = currency_2)))
price2_inbase <- round(price_2*(price2_inbase[which(price2_inbase$currency == currency_),2]),2)
invisible(capture.output(fx_usd <- exchange_rate_latest()))
price1_usd <- as.numeric(fx_usd[which(fx_usd$currency == currency_),2])
price2_usd <- as.numeric(fx_usd[which(fx_usd$currency == currency_2),2])
system("clear");
if(country_1 == country_2){
cat(" Date: ", format(as.Date(Sys.Date(),origin="1970-01-01")),"\n",
"Source city: ", str_to_title(city_1),"\n",
"Compared city: ", str_to_title(city_2),"\n",
"Amount: ", amount_, currency_,"in",str_to_title(city_1),"\n",
paste0("\nYou need to have ", paste(price_2, currency_2), " in ", str_to_title(city_2)," to maintain the same life in ",str_to_title(city_1)," with ",amount_," ",currency_,"."),"\n",
"\nEarning rate (City 2 / City 1): ", round((price_2/price2_usd)/(as.numeric(amount_)/as.numeric(price1_usd)),2),"\n")
}else{
cat(" Date: ", format(as.Date(Sys.Date(),origin="1970-01-01")),"\n",
"Source city: ", str_to_title(city_1),"\n",
"Compared city: ", str_to_title(city_2),"\n",
"Amount: ", amount_, currency_,"in",str_to_title(city_1),"\n",
"\nYou need to have", paste0(paste(price_2, currency_2), " (",paste(price2_inbase, currency_), ") in ", str_to_title(city_2)," to maintain the same life in ",str_to_title(city_1)," with ",amount_," ",currency_),"\n",
"\nEarning rate (City 2 / City 1): ", round((price_2/price2_usd)/(as.numeric(amount_)/as.numeric(price1_usd)),2),"\n")
}
end_ <- Sys.time()
time_ <- end_ - start_
cat("\n\nProcess time: ",round(time_,2), " seconds.\n\n")
|
#' @importFrom readr read_csv
#' @import dplyr
#' @export
loadTennessee <- function() {
countyNameFIPSMapping <- getCountyNameFIPSMapping('47') %>%
mutate(CountyName=toupper(CountyName))
df <- read_csv("data-raw/tn/RptSixMonthSumJune2016.csv", col_names=paste0('X', 1:7), col_types=paste0(rep('c', 7), collapse="")) %>%
mutate(Year = 2016, Month = 11) %>% # Hardcode until we add historical data
mutate_each(funs(gsub(x=., pattern=",", replacement=""))) %>%
mutate_each("as.integer", X3, X4) %>%
mutate(D=NA, G=NA, L=NA, R=NA, O=NA, CountyName=X1, N=X3+X4) %>%
select(CountyName, D, G, L, R, N, O, Year, Month) %>%
mutate_each("as.integer", -CountyName) %>%
inner_join(countyNameFIPSMapping, by=c("CountyName"="CountyName")) %>% select(-CountyName)
df
}
| /r-packages/uselections/R/tn.R | no_license | jilmun/voter-fraud | R | false | false | 796 | r | #' @importFrom readr read_csv
#' @import dplyr
#' @export
loadTennessee <- function() {
countyNameFIPSMapping <- getCountyNameFIPSMapping('47') %>%
mutate(CountyName=toupper(CountyName))
df <- read_csv("data-raw/tn/RptSixMonthSumJune2016.csv", col_names=paste0('X', 1:7), col_types=paste0(rep('c', 7), collapse="")) %>%
mutate(Year = 2016, Month = 11) %>% # Hardcode until we add historical data
mutate_each(funs(gsub(x=., pattern=",", replacement=""))) %>%
mutate_each("as.integer", X3, X4) %>%
mutate(D=NA, G=NA, L=NA, R=NA, O=NA, CountyName=X1, N=X3+X4) %>%
select(CountyName, D, G, L, R, N, O, Year, Month) %>%
mutate_each("as.integer", -CountyName) %>%
inner_join(countyNameFIPSMapping, by=c("CountyName"="CountyName")) %>% select(-CountyName)
df
}
|
#Load required libraries
library(ggplot2)
library(e1071)
library(randomForest)
library(MASS)
library(caret)
library(dplyr)
library(class)
library(FNN)
library(tree)
library(gbm)
library(knitr)
library(tidyr)
#Helper functions
#KNN classification
make_knn_pred = function(k = 1, train_X, test_X, train_Y, test_Y) {
pred = knn(train_X, test_X, train_Y, k = k)
mean(test_Y!=pred)}
#KNN Regression
rmse = function(actual, predicted) {
sqrt(mean((actual - predicted) ^ 2))
}
make_knn_pred_Reg = function(k = 1, train_X, test_X, train_Y, test_Y) {
pred = knn.reg(train = train_X,
test = test_X,
y = train_Y, k = k)$pred
act = test_Y
rmse(predicted = pred, actual = act)
} | /FinalHelper.R | no_license | rwest-ncsu/Wine-Quality-Project | R | false | false | 714 | r | #Load required libraries
library(ggplot2)
library(e1071)
library(randomForest)
library(MASS)
library(caret)
library(dplyr)
library(class)
library(FNN)
library(tree)
library(gbm)
library(knitr)
library(tidyr)
#Helper functions
#KNN classification
make_knn_pred = function(k = 1, train_X, test_X, train_Y, test_Y) {
pred = knn(train_X, test_X, train_Y, k = k)
mean(test_Y!=pred)}
#KNN Regression
rmse = function(actual, predicted) {
sqrt(mean((actual - predicted) ^ 2))
}
make_knn_pred_Reg = function(k = 1, train_X, test_X, train_Y, test_Y) {
pred = knn.reg(train = train_X,
test = test_X,
y = train_Y, k = k)$pred
act = test_Y
rmse(predicted = pred, actual = act)
} |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AddEdge.R
\name{updateImagesOnEdgeAdded}
\alias{updateImagesOnEdgeAdded}
\title{Update images after adding an edge}
\usage{
updateImagesOnEdgeAdded(cn, net, dropImages, from, to)
}
\arguments{
\item{cn}{An open RODBC connection.}
\item{net}{The network.}
\item{dropImages}{Whether images should be dropped or updated.}
\item{from}{The index of the parent node}
\item{to}{The index of the child node}
}
\description{
Update the images after adding an edge.
Note that to be able to update the saved network images, the child must be
a binary noisy-or node.
}
\keyword{internal}
| /man/updateImagesOnEdgeAdded.Rd | permissive | mickash/Adaptive-Bayesian-Networks | R | false | false | 667 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AddEdge.R
\name{updateImagesOnEdgeAdded}
\alias{updateImagesOnEdgeAdded}
\title{Update images after adding an edge}
\usage{
updateImagesOnEdgeAdded(cn, net, dropImages, from, to)
}
\arguments{
\item{cn}{An open RODBC connection.}
\item{net}{The network.}
\item{dropImages}{Whether images should be dropped or updated.}
\item{from}{The index of the parent node}
\item{to}{The index of the child node}
}
\description{
Update the images after adding an edge.
Note that to be able to update the saved network images, the child must be
a binary noisy-or node.
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.